refactor: move spawn_vm out of worker into provisioner

feat: spawn vm params to settings
This commit is contained in:
2024-11-27 14:38:23 +00:00
parent 088f22cea4
commit d28ca286fb
15 changed files with 491 additions and 202 deletions

View File

@ -1,14 +1,10 @@
use crate::exchange::ExchangeRateCache;
use crate::host::proxmox::{CreateVm, ProxmoxClient, VmBios, VmStatus};
use crate::provisioner::lnvps::LNVpsProvisioner;
use crate::host::proxmox::{ProxmoxClient, VmStatus};
use crate::provisioner::Provisioner;
use crate::status::{VmRunningState, VmState, VmStateCache};
use anyhow::{bail, Result};
use anyhow::Result;
use chrono::Utc;
use fedimint_tonic_lnd::Client;
use ipnetwork::IpNetwork;
use lnvps_db::{LNVpsDb, Vm, VmHost};
use log::{error, info, warn};
use lnvps_db::LNVpsDb;
use log::{debug, error, info, warn};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
pub enum WorkJob {
@ -21,9 +17,7 @@ pub enum WorkJob {
}
pub struct Worker {
read_only: bool,
db: Box<dyn LNVpsDb>,
lnd: Client,
provisioner: Box<dyn Provisioner>,
vm_state_cache: VmStateCache,
tx: UnboundedSender<WorkJob>,
@ -31,21 +25,16 @@ pub struct Worker {
}
impl Worker {
pub fn new<D: LNVpsDb + Clone + 'static>(
read_only: bool,
pub fn new<D: LNVpsDb + Clone + 'static, P: Provisioner + 'static>(
db: D,
lnd: Client,
provisioner: P,
vm_state_cache: VmStateCache,
rates: ExchangeRateCache,
) -> Self {
let (tx, rx) = unbounded_channel();
let p = LNVpsProvisioner::new(db.clone(), lnd.clone(), rates);
Self {
read_only,
db: Box::new(db),
provisioner: Box::new(p),
provisioner: Box::new(provisioner),
vm_state_cache,
lnd,
tx,
rx,
}
@ -55,74 +44,15 @@ impl Worker {
self.tx.clone()
}
/// Spawn a VM on the host
async fn spawn_vm(&self, vm: &Vm, vm_host: &VmHost, client: &ProxmoxClient) -> Result<()> {
if self.read_only {
bail!("Cant spawn VM's in read-only mode");
}
let mut ips = self.db.list_vm_ip_assignments(vm.id).await?;
if ips.is_empty() {
ips = self.provisioner.allocate_ips(vm.id).await?;
}
let ip_config = ips
.iter()
.map_while(|ip| {
if let Ok(net) = ip.ip.parse::<IpNetwork>() {
Some(match net {
IpNetwork::V4(addr) => format!("ip={}", addr),
IpNetwork::V6(addr) => format!("ip6={}", addr),
})
} else {
None
}
})
.collect::<Vec<_>>()
.join(",");
let drives = self.db.list_host_disks(vm.host_id).await?;
let drive = if let Some(d) = drives.iter().find(|d| d.enabled) {
d
} else {
bail!("No host drive found!")
};
let ssh_key = self.db.get_user_ssh_key(vm.ssh_key_id).await?;
client
.create_vm(CreateVm {
node: vm_host.name.clone(),
vm_id: (vm.id + 100) as i32,
bios: Some(VmBios::OVMF),
boot: Some("order=scsi0".to_string()),
cores: Some(vm.cpu as i32),
cpu: Some("kvm64".to_string()),
ip_config: Some(ip_config),
machine: Some("q35".to_string()),
memory: Some((vm.memory / 1024 / 1024).to_string()),
net: Some("virtio,bridge=vmbr0,tag=100".to_string()),
os_type: Some("l26".to_string()),
scsi_1: Some(format!("{}:cloudinit", &drive.name)),
scsi_hw: Some("virtio-scsi-pci".to_string()),
ssh_keys: Some(urlencoding::encode(&ssh_key.key_data).to_string()),
efi_disk_0: Some(format!("{}:0,efitype=4m", &drive.name)),
..Default::default()
})
.await?;
Ok(())
}
/// Check a VM's status
async fn check_vm(&self, vm_id: u64) -> Result<()> {
info!("Checking VM {}", vm_id);
debug!("Checking VM: {}", vm_id);
let vm = self.db.get_vm(vm_id).await?;
let host = self.db.get_host(vm.host_id).await?;
let client = ProxmoxClient::new(host.ip.parse()?).with_api_token(&host.api_token);
match client.get_vm_status(&host.name, (vm.id + 100) as i32).await {
Ok(s) => {
info!("VM {} status: {:?}", vm_id, s.status);
let state = VmState {
state: match s.status {
VmStatus::Stopped => VmRunningState::Stopped,
@ -139,9 +69,9 @@ impl Worker {
self.vm_state_cache.set_state(vm_id, state).await?;
}
Err(e) => {
warn!("Failed to get VM status: {}", e);
warn!("Failed to get VM {} status: {}", vm.id, e);
if vm.expires > Utc::now() {
self.spawn_vm(&vm, &host, &client).await?;
self.provisioner.spawn_vm(vm.id).await?;
}
}
}