From b190fcdd1c983fdad078bee2360f7457aa54ec2c Mon Sep 17 00:00:00 2001 From: Kieran Date: Thu, 20 Mar 2025 12:30:34 +0000 Subject: [PATCH] feat: re-install vm closes #10 --- src/api/routes.rs | 27 +++++++++++ src/bin/api.rs | 11 ++++- src/cors.rs | 22 +++++---- src/host/mod.rs | 3 ++ src/host/proxmox.rs | 109 +++++++++++++++++++++++++++++++++++--------- 5 files changed, 139 insertions(+), 33 deletions(-) diff --git a/src/api/routes.rs b/src/api/routes.rs index 940bd19..d58bff2 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -48,6 +48,7 @@ pub fn routes() -> Vec { v1_start_vm, v1_stop_vm, v1_restart_vm, + v1_reinstall_vm, v1_patch_vm, v1_time_series, v1_custom_template_calc, @@ -592,6 +593,32 @@ async fn v1_restart_vm( ApiData::ok(()) } +/// Re-install a VM +#[openapi(tag = "VM")] +#[patch("/api/v1/vm//re-install")] +async fn v1_reinstall_vm( + auth: Nip98Auth, + db: &State>, + settings: &State, + worker: &State>, + id: u64, +) -> ApiResult<()> { + let pubkey = auth.event.pubkey.to_bytes(); + let uid = db.upsert_user(&pubkey).await?; + let vm = db.get_vm(id).await?; + if uid != vm.user_id { + return ApiData::err("VM does not belong to you"); + } + + let host = db.get_host(vm.host_id).await?; + let client = get_host_client(&host, &settings.provisioner)?; + let info = FullVmInfo::load(vm.id, (*db).clone()).await?; + client.reinstall_vm(&info).await?; + + worker.send(WorkJob::CheckVm { vm_id: id })?; + ApiData::ok(()) +} + #[openapi(tag = "VM")] #[get("/api/v1/vm//time-series")] async fn v1_time_series( diff --git a/src/bin/api.rs b/src/bin/api.rs index 9bcaba8..56f920d 100644 --- a/src/bin/api.rs +++ b/src/bin/api.rs @@ -20,6 +20,7 @@ use std::net::{IpAddr, SocketAddr}; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; +use rocket::http::Method; #[derive(Parser)] #[clap(about, version, author)] @@ -159,7 +160,6 @@ async fn main() -> Result<(), Error> { config.port = ip.port(); if let Err(e) = rocket::Rocket::custom(config) - .attach(CORS) .manage(db.clone()) .manage(provisioner.clone()) .manage(status.clone()) @@ -174,6 +174,15 @@ async fn main() -> Result<(), Error> { ..Default::default() }), ) + .attach(CORS) + .mount("/", vec![ + rocket::Route::ranked( + isize::MAX, + Method::Options, + "/", + CORS, + ) + ]) .launch() .await { diff --git a/src/cors.rs b/src/cors.rs index 1348c28..65b2a29 100644 --- a/src/cors.rs +++ b/src/cors.rs @@ -1,8 +1,9 @@ use rocket::fairing::{Fairing, Info, Kind}; -use rocket::http::{Header, Method, Status}; -use rocket::{Request, Response}; -use std::io::Cursor; +use rocket::http::Header; +use rocket::route::{Handler, Outcome}; +use rocket::{Data, Request, Response}; +#[derive(Clone)] pub struct CORS; #[rocket::async_trait] @@ -14,7 +15,7 @@ impl Fairing for CORS { } } - async fn on_response<'r>(&self, req: &'r Request<'_>, response: &mut Response<'r>) { + async fn on_response<'r>(&self, _req: &'r Request<'_>, response: &mut Response<'r>) { response.set_header(Header::new("Access-Control-Allow-Origin", "*")); response.set_header(Header::new( "Access-Control-Allow-Methods", @@ -22,11 +23,12 @@ impl Fairing for CORS { )); response.set_header(Header::new("Access-Control-Allow-Headers", "*")); response.set_header(Header::new("Access-Control-Allow-Credentials", "true")); - - // force status 200 for options requests - if req.method() == Method::Options { - response.set_status(Status::Ok); - response.set_sized_body(None, Cursor::new("")) - } + } +} + +#[rocket::async_trait] +impl Handler for CORS { + async fn handle<'r>(&self, _request: &'r Request<'_>, _data: Data<'r>) -> Outcome<'r> { + Outcome::Success(Response::new()) } } diff --git a/src/host/mod.rs b/src/host/mod.rs index 3918532..3aed847 100644 --- a/src/host/mod.rs +++ b/src/host/mod.rs @@ -37,6 +37,9 @@ pub trait VmHostClient: Send + Sync { /// Spawn a VM async fn create_vm(&self, cfg: &FullVmInfo) -> Result<()>; + /// Re-install a vm OS + async fn reinstall_vm(&self, cfg: &FullVmInfo) -> Result<()>; + /// Get the running status of a VM async fn get_vm_state(&self, vm: &Vm) -> Result; diff --git a/src/host/proxmox.rs b/src/host/proxmox.rs index 7e28e29..6d41aa9 100644 --- a/src/host/proxmox.rs +++ b/src/host/proxmox.rs @@ -365,6 +365,30 @@ impl ProxmoxClient { node: node.to_string(), }) } + + /// Delete disks from VM + pub async fn unlink_disk( + &self, + node: &str, + vm: ProxmoxVmId, + disks: Vec, + force: bool, + ) -> Result<()> { + self.api + .req_status( + Method::PUT, + &format!( + "/api2/json/nodes/{}/qemu/{}/unlink?idlist={}&force={}", + node, + vm, + disks.join(","), + if force { "1" } else { "0" } + ), + (), + ) + .await?; + Ok(()) + } } impl ProxmoxClient { @@ -418,7 +442,7 @@ impl ProxmoxClient { bios: Some(VmBios::OVMF), boot: Some("order=scsi0".to_string()), cores: Some(vm_resources.cpu as i32), - memory: Some((vm_resources.memory / crate::GB).to_string()), + memory: Some((vm_resources.memory / crate::MB).to_string()), scsi_hw: Some("virtio-scsi-pci".to_string()), serial_0: Some("socket".to_string()), scsi_1: Some(format!("{}:cloudinit", &value.disk.name)), @@ -427,7 +451,38 @@ impl ProxmoxClient { ..Default::default() }) } + + /// Import main disk image from the template + async fn import_template_disk(&self, req: &FullVmInfo) -> Result<()> { + let vm_id = req.vm.id.into(); + + // import primary disk from image (scsi0) + self.import_disk_image(ImportDiskImageRequest { + vm_id, + node: self.node.clone(), + storage: req.disk.name.clone(), + disk: "scsi0".to_string(), + image: req.image.filename()?, + is_ssd: matches!(req.disk.kind, DiskType::SSD), + }) + .await?; + + // resize disk to match template + let j_resize = self + .resize_disk(ResizeDiskRequest { + node: self.node.clone(), + vm_id, + disk: "scsi0".to_string(), + size: req.resources()?.disk_size.to_string(), + }) + .await?; + // TODO: rollback + self.wait_for_task(&j_resize).await?; + + Ok(()) + } } + #[async_trait] impl VmHostClient for ProxmoxClient { async fn download_os_image(&self, image: &VmOsImage) -> Result<()> { @@ -499,28 +554,35 @@ impl VmHostClient for ProxmoxClient { .await?; self.wait_for_task(&t_create).await?; - // import primary disk from image (scsi0) - self.import_disk_image(ImportDiskImageRequest { - vm_id, - node: self.node.clone(), - storage: req.disk.name.clone(), - disk: "scsi0".to_string(), - image: req.image.filename()?, - is_ssd: matches!(req.disk.kind, DiskType::SSD), - }) - .await?; + // import template image + self.import_template_disk(&req).await?; - // resize disk to match template - let j_resize = self - .resize_disk(ResizeDiskRequest { - node: self.node.clone(), - vm_id, - disk: "scsi0".to_string(), - size: req.resources()?.disk_size.to_string(), - }) + // try start, otherwise ignore error (maybe its already running) + if let Ok(j_start) = self.start_vm(&self.node, vm_id).await { + if let Err(e) = self.wait_for_task(&j_start).await { + warn!("Failed to start vm: {}", e); + } + } + + Ok(()) + } + + async fn reinstall_vm(&self, req: &FullVmInfo) -> Result<()> { + let vm_id = req.vm.id.into(); + + // try stop, otherwise ignore error (maybe its already running) + if let Ok(j_stop) = self.stop_vm(&self.node, vm_id).await { + if let Err(e) = self.wait_for_task(&j_stop).await { + warn!("Failed to stop vm: {}", e); + } + } + + // unlink the existing main disk + self.unlink_disk(&self.node, vm_id, vec!["scsi0".to_string()], true) .await?; - // TODO: rollback - self.wait_for_task(&j_resize).await?; + + // import disk from template again + self.import_template_disk(&req).await?; // try start, otherwise ignore error (maybe its already running) if let Ok(j_start) = self.start_vm(&self.node, vm_id).await { @@ -1092,7 +1154,10 @@ mod tests { assert_eq!(vm.cores, Some(template.cpu as i32)); assert_eq!(vm.memory, Some((template.memory / MB).to_string())); assert_eq!(vm.on_boot, Some(true)); - assert_eq!(vm.ip_config, Some("ip=192.168.1.2/16,gw=192.168.1.1,ip6=auto".to_string())); + assert_eq!( + vm.ip_config, + Some("ip=192.168.1.2/16,gw=192.168.1.1,ip6=auto".to_string()) + ); Ok(()) } }