feat: re-install vm
Some checks failed
continuous-integration/drone/push Build is failing

closes #10
This commit is contained in:
2025-03-20 12:30:34 +00:00
parent 6b12a9bddb
commit b190fcdd1c
5 changed files with 139 additions and 33 deletions

View File

@ -48,6 +48,7 @@ pub fn routes() -> Vec<Route> {
v1_start_vm,
v1_stop_vm,
v1_restart_vm,
v1_reinstall_vm,
v1_patch_vm,
v1_time_series,
v1_custom_template_calc,
@ -592,6 +593,32 @@ async fn v1_restart_vm(
ApiData::ok(())
}
/// Re-install a VM
#[openapi(tag = "VM")]
#[patch("/api/v1/vm/<id>/re-install")]
async fn v1_reinstall_vm(
auth: Nip98Auth,
db: &State<Arc<dyn LNVpsDb>>,
settings: &State<Settings>,
worker: &State<UnboundedSender<WorkJob>>,
id: u64,
) -> ApiResult<()> {
let pubkey = auth.event.pubkey.to_bytes();
let uid = db.upsert_user(&pubkey).await?;
let vm = db.get_vm(id).await?;
if uid != vm.user_id {
return ApiData::err("VM does not belong to you");
}
let host = db.get_host(vm.host_id).await?;
let client = get_host_client(&host, &settings.provisioner)?;
let info = FullVmInfo::load(vm.id, (*db).clone()).await?;
client.reinstall_vm(&info).await?;
worker.send(WorkJob::CheckVm { vm_id: id })?;
ApiData::ok(())
}
#[openapi(tag = "VM")]
#[get("/api/v1/vm/<id>/time-series")]
async fn v1_time_series(

View File

@ -20,6 +20,7 @@ use std::net::{IpAddr, SocketAddr};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use rocket::http::Method;
#[derive(Parser)]
#[clap(about, version, author)]
@ -159,7 +160,6 @@ async fn main() -> Result<(), Error> {
config.port = ip.port();
if let Err(e) = rocket::Rocket::custom(config)
.attach(CORS)
.manage(db.clone())
.manage(provisioner.clone())
.manage(status.clone())
@ -174,6 +174,15 @@ async fn main() -> Result<(), Error> {
..Default::default()
}),
)
.attach(CORS)
.mount("/", vec![
rocket::Route::ranked(
isize::MAX,
Method::Options,
"/<catch_all_options_route..>",
CORS,
)
])
.launch()
.await
{

View File

@ -1,8 +1,9 @@
use rocket::fairing::{Fairing, Info, Kind};
use rocket::http::{Header, Method, Status};
use rocket::{Request, Response};
use std::io::Cursor;
use rocket::http::Header;
use rocket::route::{Handler, Outcome};
use rocket::{Data, Request, Response};
#[derive(Clone)]
pub struct CORS;
#[rocket::async_trait]
@ -14,7 +15,7 @@ impl Fairing for CORS {
}
}
async fn on_response<'r>(&self, req: &'r Request<'_>, response: &mut Response<'r>) {
async fn on_response<'r>(&self, _req: &'r Request<'_>, response: &mut Response<'r>) {
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
response.set_header(Header::new(
"Access-Control-Allow-Methods",
@ -22,11 +23,12 @@ impl Fairing for CORS {
));
response.set_header(Header::new("Access-Control-Allow-Headers", "*"));
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
// force status 200 for options requests
if req.method() == Method::Options {
response.set_status(Status::Ok);
response.set_sized_body(None, Cursor::new(""))
}
}
}
#[rocket::async_trait]
impl Handler for CORS {
async fn handle<'r>(&self, _request: &'r Request<'_>, _data: Data<'r>) -> Outcome<'r> {
Outcome::Success(Response::new())
}
}

View File

@ -37,6 +37,9 @@ pub trait VmHostClient: Send + Sync {
/// Spawn a VM
async fn create_vm(&self, cfg: &FullVmInfo) -> Result<()>;
/// Re-install a vm OS
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> Result<()>;
/// Get the running status of a VM
async fn get_vm_state(&self, vm: &Vm) -> Result<VmState>;

View File

@ -365,6 +365,30 @@ impl ProxmoxClient {
node: node.to_string(),
})
}
/// Delete disks from VM
pub async fn unlink_disk(
&self,
node: &str,
vm: ProxmoxVmId,
disks: Vec<String>,
force: bool,
) -> Result<()> {
self.api
.req_status(
Method::PUT,
&format!(
"/api2/json/nodes/{}/qemu/{}/unlink?idlist={}&force={}",
node,
vm,
disks.join(","),
if force { "1" } else { "0" }
),
(),
)
.await?;
Ok(())
}
}
impl ProxmoxClient {
@ -418,7 +442,7 @@ impl ProxmoxClient {
bios: Some(VmBios::OVMF),
boot: Some("order=scsi0".to_string()),
cores: Some(vm_resources.cpu as i32),
memory: Some((vm_resources.memory / crate::GB).to_string()),
memory: Some((vm_resources.memory / crate::MB).to_string()),
scsi_hw: Some("virtio-scsi-pci".to_string()),
serial_0: Some("socket".to_string()),
scsi_1: Some(format!("{}:cloudinit", &value.disk.name)),
@ -427,7 +451,38 @@ impl ProxmoxClient {
..Default::default()
})
}
/// Import main disk image from the template
async fn import_template_disk(&self, req: &FullVmInfo) -> Result<()> {
let vm_id = req.vm.id.into();
// import primary disk from image (scsi0)
self.import_disk_image(ImportDiskImageRequest {
vm_id,
node: self.node.clone(),
storage: req.disk.name.clone(),
disk: "scsi0".to_string(),
image: req.image.filename()?,
is_ssd: matches!(req.disk.kind, DiskType::SSD),
})
.await?;
// resize disk to match template
let j_resize = self
.resize_disk(ResizeDiskRequest {
node: self.node.clone(),
vm_id,
disk: "scsi0".to_string(),
size: req.resources()?.disk_size.to_string(),
})
.await?;
// TODO: rollback
self.wait_for_task(&j_resize).await?;
Ok(())
}
}
#[async_trait]
impl VmHostClient for ProxmoxClient {
async fn download_os_image(&self, image: &VmOsImage) -> Result<()> {
@ -499,28 +554,35 @@ impl VmHostClient for ProxmoxClient {
.await?;
self.wait_for_task(&t_create).await?;
// import primary disk from image (scsi0)
self.import_disk_image(ImportDiskImageRequest {
vm_id,
node: self.node.clone(),
storage: req.disk.name.clone(),
disk: "scsi0".to_string(),
image: req.image.filename()?,
is_ssd: matches!(req.disk.kind, DiskType::SSD),
})
.await?;
// import template image
self.import_template_disk(&req).await?;
// resize disk to match template
let j_resize = self
.resize_disk(ResizeDiskRequest {
node: self.node.clone(),
vm_id,
disk: "scsi0".to_string(),
size: req.resources()?.disk_size.to_string(),
})
// try start, otherwise ignore error (maybe its already running)
if let Ok(j_start) = self.start_vm(&self.node, vm_id).await {
if let Err(e) = self.wait_for_task(&j_start).await {
warn!("Failed to start vm: {}", e);
}
}
Ok(())
}
async fn reinstall_vm(&self, req: &FullVmInfo) -> Result<()> {
let vm_id = req.vm.id.into();
// try stop, otherwise ignore error (maybe its already running)
if let Ok(j_stop) = self.stop_vm(&self.node, vm_id).await {
if let Err(e) = self.wait_for_task(&j_stop).await {
warn!("Failed to stop vm: {}", e);
}
}
// unlink the existing main disk
self.unlink_disk(&self.node, vm_id, vec!["scsi0".to_string()], true)
.await?;
// TODO: rollback
self.wait_for_task(&j_resize).await?;
// import disk from template again
self.import_template_disk(&req).await?;
// try start, otherwise ignore error (maybe its already running)
if let Ok(j_start) = self.start_vm(&self.node, vm_id).await {
@ -1092,7 +1154,10 @@ mod tests {
assert_eq!(vm.cores, Some(template.cpu as i32));
assert_eq!(vm.memory, Some((template.memory / MB).to_string()));
assert_eq!(vm.on_boot, Some(true));
assert_eq!(vm.ip_config, Some("ip=192.168.1.2/16,gw=192.168.1.1,ip6=auto".to_string()));
assert_eq!(
vm.ip_config,
Some("ip=192.168.1.2/16,gw=192.168.1.1,ip6=auto".to_string())
);
Ok(())
}
}