diff --git a/src/api/model.rs b/src/api/model.rs index e70a46f..6fd2f94 100644 --- a/src/api/model.rs +++ b/src/api/model.rs @@ -207,7 +207,7 @@ pub struct CreateVmRequest { pub template_id: u64, pub image_id: u64, pub ssh_key_id: u64, - pub ref_code: Option + pub ref_code: Option, } #[derive(Serialize, Deserialize, JsonSchema)] diff --git a/src/api/routes.rs b/src/api/routes.rs index cda130e..9c3be40 100644 --- a/src/api/routes.rs +++ b/src/api/routes.rs @@ -2,7 +2,7 @@ use crate::api::model::{ AccountPatchRequest, ApiUserSshKey, ApiVmIpAssignment, ApiVmOsImage, ApiVmPayment, ApiVmStatus, ApiVmTemplate, CreateSshKey, CreateVmRequest, VMPatchRequest, }; -use crate::host::{get_host_client, FullVmInfo}; +use crate::host::{get_host_client, FullVmInfo, TimeSeries, TimeSeriesData}; use crate::nip98::Nip98Auth; use crate::provisioner::{HostCapacityService, LNVpsProvisioner}; use crate::settings::Settings; @@ -42,7 +42,8 @@ pub fn routes() -> Vec { v1_start_vm, v1_stop_vm, v1_restart_vm, - v1_patch_vm + v1_patch_vm, + v1_time_series ] } @@ -229,7 +230,7 @@ async fn v1_patch_vm( if let Some(ptr) = &data.reverse_dns { let mut ips = db.list_vm_ip_assignments(vm.id).await?; - for mut ip in ips.iter_mut() { + for ip in ips.iter_mut() { ip.dns_reverse = Some(ptr.to_string()); provisioner.update_reverse_ip_dns(ip).await?; db.update_vm_ip_assignment(ip).await?; @@ -473,6 +474,26 @@ async fn v1_restart_vm( ApiData::ok(()) } +#[openapi(tag = "VM")] +#[get("/api/v1/vm//time-series")] +async fn v1_time_series( + auth: Nip98Auth, + db: &State>, + settings: &State, + id: u64, +) -> ApiResult> { + let pubkey = auth.event.pubkey.to_bytes(); + let uid = db.upsert_user(&pubkey).await?; + let vm = db.get_vm(id).await?; + if uid != vm.user_id { + return ApiData::err("VM does not belong to you"); + } + + let host = db.get_host(vm.host_id).await?; + let client = get_host_client(&host, &settings.provisioner)?; + ApiData::ok(client.get_time_series_data(&vm, TimeSeries::Hourly).await?) +} + /// Get payment status (for polling) #[openapi(tag = "Payment")] #[get("/api/v1/payment/")] diff --git a/src/dns/cloudflare.rs b/src/dns/cloudflare.rs index a0d9ab1..4f13c37 100644 --- a/src/dns/cloudflare.rs +++ b/src/dns/cloudflare.rs @@ -27,7 +27,11 @@ impl Cloudflare { "Error updating record: {:?}", rsp.errors .as_ref() - .map(|e| e.iter().map(|i| i.message.clone()).collect::>().join(", ")) + .map(|e| e + .iter() + .map(|i| i.message.clone()) + .collect::>() + .join(", ")) .unwrap_or_default() ); } diff --git a/src/host/libvirt.rs b/src/host/libvirt.rs index d85229a..32d54fb 100644 --- a/src/host/libvirt.rs +++ b/src/host/libvirt.rs @@ -1,4 +1,4 @@ -use crate::host::{FullVmInfo, VmHostClient}; +use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient}; use crate::status::VmState; use lnvps_db::{async_trait, Vm, VmOsImage}; @@ -37,4 +37,12 @@ impl VmHostClient for LibVirt { async fn configure_vm(&self, vm: &Vm) -> anyhow::Result<()> { todo!() } + + async fn get_time_series_data( + &self, + vm: &Vm, + series: TimeSeries, + ) -> anyhow::Result> { + todo!() + } } diff --git a/src/host/mod.rs b/src/host/mod.rs index 8e0e8e4..06a5de1 100644 --- a/src/host/mod.rs +++ b/src/host/mod.rs @@ -6,6 +6,8 @@ use lnvps_db::{ async_trait, IpRange, LNVpsDb, UserSshKey, Vm, VmHost, VmHostDisk, VmHostKind, VmIpAssignment, VmOsImage, VmTemplate, }; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::sync::Arc; @@ -40,6 +42,13 @@ pub trait VmHostClient: Send + Sync { /// Apply vm configuration (patch) async fn configure_vm(&self, cfg: &FullVmInfo) -> Result<()>; + + /// Get resource usage data + async fn get_time_series_data( + &self, + vm: &Vm, + series: TimeSeries, + ) -> Result>; } pub fn get_host_client(host: &VmHost, cfg: &ProvisionerConfig) -> Result> { @@ -118,3 +127,24 @@ impl FullVmInfo { }) } } + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +pub struct TimeSeriesData { + pub timestamp: u64, + pub cpu: f32, + pub memory: f32, + pub memory_size: u64, + pub net_in: f32, + pub net_out: f32, + pub disk_write: f32, + pub disk_read: f32, +} + +#[derive(Debug, Clone)] +pub enum TimeSeries { + Hourly, + Daily, + Weekly, + Monthly, + Yearly, +} diff --git a/src/host/proxmox.rs b/src/host/proxmox.rs index 8cee4ab..13e4602 100644 --- a/src/host/proxmox.rs +++ b/src/host/proxmox.rs @@ -1,4 +1,4 @@ -use crate::host::{FullVmInfo, VmHostClient}; +use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient}; use crate::json_api::JsonApi; use crate::settings::{QemuConfig, SshConfig}; use crate::ssh_client::SshClient; @@ -164,6 +164,22 @@ impl ProxmoxClient { } } + pub async fn get_vm_rrd_data( + &self, + id: ProxmoxVmId, + timeframe: &str, + ) -> Result> { + let data: ResponseBase> = self + .api + .get(&format!( + "/api2/json/nodes/{}/qemu/{}/rrddata?timeframe={}", + &self.node, id, timeframe + )) + .await?; + + Ok(data.data) + } + /// Get the current status of a running task /// /// https://pve.proxmox.com/pve-docs/api-viewer/?ref=public_apis#/nodes/{node}/tasks/{upid}/status @@ -480,16 +496,15 @@ impl VmHostClient for ProxmoxClient { self.wait_for_task(&t_create).await?; // import primary disk from image (scsi0) - self - .import_disk_image(ImportDiskImageRequest { - vm_id, - node: self.node.clone(), - storage: req.disk.name.clone(), - disk: "scsi0".to_string(), - image: req.image.filename()?, - is_ssd: matches!(req.disk.kind, DiskType::SSD), - }) - .await?; + self.import_disk_image(ImportDiskImageRequest { + vm_id, + node: self.node.clone(), + storage: req.disk.name.clone(), + disk: "scsi0".to_string(), + image: req.image.filename()?, + is_ssd: matches!(req.disk.kind, DiskType::SSD), + }) + .await?; // resize disk to match template let j_resize = self @@ -549,6 +564,26 @@ impl VmHostClient for ProxmoxClient { .await?; Ok(()) } + + async fn get_time_series_data( + &self, + vm: &Vm, + series: TimeSeries, + ) -> Result> { + let r = self + .get_vm_rrd_data( + vm.id.into(), + match series { + TimeSeries::Hourly => "hour", + TimeSeries::Daily => "day", + TimeSeries::Weekly => "week", + TimeSeries::Monthly => "month", + TimeSeries::Yearly => "year", + }, + ) + .await?; + Ok(r.into_iter().map(TimeSeriesData::from).collect()) + } } /// Wrap a database vm id @@ -901,3 +936,43 @@ pub struct VmConfig { #[serde(skip_serializing_if = "Option::is_none")] pub serial_0: Option, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RrdDataPoint { + pub time: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub cpu: Option, + #[serde(rename = "mem")] + #[serde(skip_serializing_if = "Option::is_none")] + pub memory: Option, + #[serde(rename = "maxmem")] + #[serde(skip_serializing_if = "Option::is_none")] + pub memory_size: Option, + #[serde(rename = "netin")] + #[serde(skip_serializing_if = "Option::is_none")] + pub net_in: Option, + #[serde(rename = "netout")] + #[serde(skip_serializing_if = "Option::is_none")] + pub net_out: Option, + #[serde(rename = "diskwrite")] + #[serde(skip_serializing_if = "Option::is_none")] + pub disk_write: Option, + #[serde(rename = "diskread")] + #[serde(skip_serializing_if = "Option::is_none")] + pub disk_read: Option, +} + +impl From for TimeSeriesData { + fn from(value: RrdDataPoint) -> Self { + Self { + timestamp: value.time, + cpu: value.cpu.unwrap_or(0.0), + memory: value.memory.unwrap_or(0.0), + memory_size: value.memory_size.unwrap_or(0), + net_in: value.net_in.unwrap_or(0.0), + net_out: value.net_out.unwrap_or(0.0), + disk_write: value.disk_write.unwrap_or(0.0), + disk_read: value.disk_read.unwrap_or(0.0), + } + } +} diff --git a/src/provisioner/capacity.rs b/src/provisioner/capacity.rs index 6240179..902e1da 100644 --- a/src/provisioner/capacity.rs +++ b/src/provisioner/capacity.rs @@ -80,7 +80,8 @@ impl HostCapacityService { .filter_map(|v| { templates .iter() - .find(|t| t.id == v.template_id).map(|t| (v.id, t)) + .find(|t| t.id == v.template_id) + .map(|t| (v.id, t)) }) .collect();