Compare commits
67 Commits
Author | SHA1 | Date | |
---|---|---|---|
e463e06a35
|
|||
1dda3a561d
|
|||
179d70edb0
|
|||
c859c153c1
|
|||
cd7ac9e967
|
|||
f6a756db78
|
|||
7e10e0dd6e
|
|||
4db6aa1897
|
|||
4dfc33bca2
|
|||
c432f603ec
|
|||
a4850b4e06
|
|||
9296e571ec
|
|||
2ae158c31a
|
|||
b7d7027eec
|
|||
b9bec36843
|
|||
97d631ce5d
|
|||
36069bb6a7
|
|||
6a8f1826bb
|
|||
7deed82a7c
|
|||
6ca8283040
|
|||
b8ab61c48f
|
|||
70a4d9c638
|
|||
d316ccacd7
|
|||
396cb8a7ef
|
|||
8068b7d5bc
|
|||
e2d6d84439
|
|||
ea6499558d
|
|||
1bb03762bd
|
|||
603099e947
|
|||
d18f32e897
|
|||
f4b8f88772
|
|||
c4373b78d0
|
|||
39ca5ee8b4
|
|||
cd7c7cd7be
|
|||
c570222e8a
|
|||
32fc16dca2
|
|||
a57c85fa2c
|
|||
4bf8b06337
|
|||
2505082a59
|
|||
ec7fa92010
|
|||
af36d4e586
|
|||
9106221204
|
|||
0b51a5ecee
|
|||
6850b786cf
|
|||
cb71ba8bc6
|
|||
b6356636de | |||
9fb4a38e72 | |||
cbafca8da7 | |||
9ee4232706 | |||
39622315be | |||
b190fcdd1c | |||
6b12a9bddb | |||
6de4471861 | |||
3527742992 | |||
be4a981bea | |||
f934bb3132 | |||
6c7ae6ac89 | |||
5c57abb9c1 | |||
2d55392050 | |||
02d606d60c
|
|||
029f2cb6e1
|
|||
45dd0c4398
|
|||
1c282e460f
|
|||
a2e08c5965
|
|||
b9f21c09bd
|
|||
d94ca9e1bb
|
|||
9606b91e6d
|
1022
Cargo.lock
generated
1022
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
65
Cargo.toml
65
Cargo.toml
@ -1,60 +1,19 @@
|
||||
[package]
|
||||
name = "lnvps"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
[workspace]
|
||||
resolver = "3"
|
||||
members = [
|
||||
"lnvps_db",
|
||||
"lnvps_api",
|
||||
"lnvps_nostr",
|
||||
"lnvps_common"
|
||||
]
|
||||
|
||||
[[bin]]
|
||||
name = "api"
|
||||
|
||||
[features]
|
||||
default = ["mikrotik", "nostr-dm", "proxmox", "lnd", "cloudflare"]
|
||||
mikrotik = ["dep:reqwest"]
|
||||
nostr-dm = ["dep:nostr-sdk"]
|
||||
proxmox = ["dep:reqwest", "dep:ssh2", "dep:tokio-tungstenite"]
|
||||
libvirt = ["dep:virt"]
|
||||
lnd = ["dep:fedimint-tonic-lnd"]
|
||||
bitvora = ["dep:reqwest", "dep:tokio-stream"]
|
||||
cloudflare = ["dep:reqwest"]
|
||||
|
||||
[dependencies]
|
||||
lnvps_db = { path = "lnvps_db" }
|
||||
tokio = { version = "1.37.0", features = ["rt", "rt-multi-thread", "macros", "sync"] }
|
||||
[workspace.dependencies]
|
||||
tokio = { version = "1.37.0", features = ["rt", "rt-multi-thread", "macros"] }
|
||||
anyhow = "1.0.83"
|
||||
config = { version = "0.15.8", features = ["yaml"] }
|
||||
log = "0.4.21"
|
||||
fern = "0.7.1"
|
||||
env_logger = "0.11.7"
|
||||
serde = { version = "1.0.213", features = ["derive"] }
|
||||
serde_json = "1.0.132"
|
||||
rocket = { version = "0.5.1", features = ["json"] }
|
||||
rocket_okapi = { version = "0.9.0", features = ["swagger"] }
|
||||
schemars = { version = "0.8.22", features = ["chrono"] }
|
||||
chrono = { version = "0.4.38", features = ["serde"] }
|
||||
base64 = { version = "0.22.1", features = ["alloc"] }
|
||||
urlencoding = "2.1.3"
|
||||
ipnetwork = { git = "https://git.v0l.io/Kieran/ipnetwork.git", rev = "35977adc8103cfc232bc95fbc32f4e34f2b6a6d7" }
|
||||
rand = "0.9.0"
|
||||
clap = { version = "4.5.21", features = ["derive"] }
|
||||
ssh-key = "0.6.7"
|
||||
lettre = { version = "0.11.10", features = ["tokio1-native-tls"] }
|
||||
ws = { package = "rocket_ws", version = "0.1.0" }
|
||||
native-tls = "0.2.12"
|
||||
config = { version = "0.15.8", features = ["yaml"] }
|
||||
hex = "0.4.3"
|
||||
futures = "0.3.31"
|
||||
|
||||
#nostr-dm
|
||||
nostr = { version = "0.39.0", default-features = false, features = ["std"] }
|
||||
nostr-sdk = { version = "0.39.0", optional = true, default-features = false, features = ["nip44", "nip59"] }
|
||||
|
||||
#proxmox
|
||||
tokio-tungstenite = { version = "^0.21", features = ["native-tls"], optional = true }
|
||||
ssh2 = { version = "0.9.4", optional = true }
|
||||
reqwest = { version = "0.12.8", optional = true }
|
||||
|
||||
#libvirt
|
||||
virt = { version = "0.4.2", optional = true }
|
||||
|
||||
#lnd
|
||||
fedimint-tonic-lnd = { version = "0.2.0", default-features = false, features = ["invoicesrpc"], optional = true }
|
||||
|
||||
#bitvora
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"], optional = true }
|
@ -3,10 +3,12 @@ ARG IMAGE=rust:bookworm
|
||||
FROM $IMAGE AS build
|
||||
WORKDIR /app/src
|
||||
COPY . .
|
||||
RUN apt update && apt -y install protobuf-compiler
|
||||
RUN cargo test && cargo install --path . --root /app/build
|
||||
RUN apt update && apt -y install protobuf-compiler libvirt-dev
|
||||
RUN cargo test \
|
||||
&& cargo install --root /app/build --path lnvps_api \
|
||||
&& cargo install --root /app/build --path lnvps_nostr
|
||||
|
||||
FROM $IMAGE AS runner
|
||||
WORKDIR /app
|
||||
COPY --from=build /app/build .
|
||||
ENTRYPOINT ["./bin/api"]
|
||||
ENTRYPOINT ["./bin/lnvps_api"]
|
92
README.md
92
README.md
@ -2,13 +2,23 @@
|
||||
|
||||
A bitcoin powered VPS system.
|
||||
|
||||
## Requirements
|
||||
## Features
|
||||
|
||||
- MySql database
|
||||
- Lightning node:
|
||||
- LND
|
||||
- [Bitvora](https://bitvora.com?r=lnvps)
|
||||
- Proxmox server
|
||||
- MySQL database
|
||||
- Payments:
|
||||
- Bitcoin:
|
||||
- LND
|
||||
- [Bitvora](https://bitvora.com?r=lnvps)
|
||||
- Fiat:
|
||||
- [RevolutPay](https://www.revolut.com/business/revolut-pay/)
|
||||
- VM Backend:
|
||||
- Proxmox
|
||||
- LibVirt (WIP)
|
||||
- Network Resources:
|
||||
- Mikrotik JSON-API
|
||||
- OVH API (dedicated server virtual mac)
|
||||
- DNS Resources:
|
||||
- Cloudflare API
|
||||
|
||||
## Required Config
|
||||
|
||||
@ -33,25 +43,25 @@ delete-after: 3
|
||||
read-only: false
|
||||
|
||||
# Provisioner is the main process which handles creating/deleting VM's
|
||||
# Currently supports: Proxmox
|
||||
provisioner:
|
||||
proxmox:
|
||||
# Proxmox (QEMU) settings used for spawning VM's
|
||||
# QEMU settings used for spawning VM's
|
||||
qemu:
|
||||
bios: "ovmf"
|
||||
machine: "q35"
|
||||
os-type: "l26"
|
||||
bridge: "vmbr0"
|
||||
cpu: "kvm64"
|
||||
kvm: false
|
||||
libvirt:
|
||||
# QEMU settings used for spawning VM's
|
||||
qemu:
|
||||
bios: "ovmf"
|
||||
machine: "q35"
|
||||
os-type: "l26"
|
||||
bridge: "vmbr0"
|
||||
cpu: "kvm64"
|
||||
vlan: 100
|
||||
kvm: false
|
||||
|
||||
# Networking policy
|
||||
network-policy:
|
||||
# Configure network equipment on provisioning IP resources
|
||||
access: "auto"
|
||||
# Use SLAAC to auto-configure VM ipv6 addresses
|
||||
ip6-slaac: true
|
||||
```
|
||||
|
||||
### Email notifications
|
||||
@ -89,42 +99,28 @@ nostr:
|
||||
|
||||
### Network Setup (Advanced)
|
||||
|
||||
When ARP is disabled (reply-only) on your router you may need to create static ARP entries when allocating
|
||||
IPs, we support managing ARP entries on routers directly as part of the provisioning process.
|
||||
|
||||
```yaml
|
||||
# (Optional)
|
||||
# When allocating IPs for VM's it may be necessary to create static ARP entries on
|
||||
# your router, at least one router can be configured
|
||||
#
|
||||
# Currently supports: Mikrotik
|
||||
router:
|
||||
mikrotik:
|
||||
# !! MAKE SURE TO USE HTTPS !!
|
||||
url: "https://my-router.net"
|
||||
username: "admin"
|
||||
password: "admin"
|
||||
network-policy:
|
||||
# How packets get to the VM
|
||||
# (default "auto", nothing to do, packets will always arrive)
|
||||
access:
|
||||
# Static ARP entries are added to the router for each provisioned IP
|
||||
static-arp:
|
||||
# Interface where the static ARP entry is added
|
||||
interface: "bridge1"
|
||||
```
|
||||
**TODO:** AccessPolicy is now managed in the database
|
||||
|
||||
### DNS (PTR/A/AAAA)
|
||||
|
||||
To create PTR records automatically use the following config:
|
||||
```yaml
|
||||
dns:
|
||||
cloudflare:
|
||||
# The zone containing the reverse domain (eg. X.Y.Z.in-addr.arpa)
|
||||
reverse-zone-id: "my-reverse-zone-id"
|
||||
# The zone where forward (A/AAAA) entries are added (eg. lnvps.cloud zone)
|
||||
# We create forward entries with the format vm-<vmid>.lnvps.cloud
|
||||
forward-zone-id: "my-forward-zone-id"
|
||||
# API token to add/remove DNS records to this zone
|
||||
token: "my-api-token"
|
||||
# The zone where forward (A/AAAA) entries are added (eg. lnvps.cloud zone)
|
||||
# We create forward entries with the format vm-<vmid>.lnvps.cloud
|
||||
forward-zone-id: "my-forward-zone-id"
|
||||
api:
|
||||
cloudflare:
|
||||
# API token to add/remove DNS records to this zone
|
||||
token: "my-api-token"
|
||||
```
|
||||
|
||||
### Taxes
|
||||
To charge taxes add the following config, the values are percentage whole numbers:
|
||||
```yaml
|
||||
tax-rate:
|
||||
IE: 23
|
||||
US: 15
|
||||
```
|
||||
|
||||
Taxes are charged based on the users specified country
|
@ -2,7 +2,7 @@ volumes:
|
||||
db:
|
||||
services:
|
||||
db:
|
||||
image: mariadb
|
||||
image: docker.io/mariadb
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "MARIADB_ROOT_PASSWORD=root"
|
||||
|
861
grafana.json
Normal file
861
grafana.json
Normal file
@ -0,0 +1,861 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "left",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"filterable": false,
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "cpu"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 62
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "gb_memory"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 88
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "gb_ssd"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 81
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "gb_hdd"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 75
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "load_factor"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 93
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "sold_cpu"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 87
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "sold_gb_memory"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 133
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "sold_gb_ssd_disk"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 141
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "name"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 205
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "vms"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 63
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 3,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": []
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT \nh.name,\nh.cpu,\nh.memory / 1024 / 1024 / 1024 gb_memory,\n(select count(*) from vm where vm.host_id = h.id and vm.expires > current_timestamp and vm.deleted = 0) as vms,\n(select sum(size) from vm_host_disk hd where hd.host_id = h.id and hd.enabled = 1 and hd.kind = 1) / 1024 / 1024 / 1024 gb_ssd,\n(select sum(size) from vm_host_disk hd where hd.host_id = h.id and hd.enabled = 1 and hd.kind = 0) / 1024 / 1024 / 1024 gb_hdd,\n(select sum(case when v.template_id is null then (select cpu from vm_custom_template vct where vct.id = v.custom_template_id) else (select cpu from vm_template vt where vt.id = v.template_id) end) from vm v where v.host_id = h.id and expires > current_timestamp()) sold_cpu,\n(select sum(case when v.template_id is null then (select memory from vm_custom_template vct where vct.id = v.custom_template_id) else (select memory from vm_template vt where vt.id = v.template_id) end) from vm v where v.host_id = h.id and expires > current_timestamp()) / 1024 / 1024 / 1024 sold_gb_memory,\n(select sum(case when v.template_id is null then (select disk_size from vm_custom_template vct where vct.id = v.custom_template_id and vct.disk_type = 1) else (select disk_size from vm_template vt where vt.id = v.template_id and vt.disk_type = 1) end) from vm v where v.host_id = h.id and expires > current_timestamp()) / 1024 / 1024 / 1024 sold_gb_ssd_disk,\n(select sum(case when v.template_id is null then (select disk_size from vm_custom_template vct where vct.id = v.custom_template_id and vct.disk_type = 0) else (select disk_size from vm_template vt where vt.id = v.template_id and vt.disk_type = 0) end) from vm v where v.host_id = h.id and expires > current_timestamp()) / 1024 / 1024 / 1024 sold_gb_hdd_disk,\n(select sum(case when v.template_id is null then 0 else (select (case when cp.currency = 'BTC' then (cp.amount/1e9) else cp.amount end) from vm_template vt,vm_cost_plan cp where vt.id = v.template_id and vt.cost_plan_id = cp.id) end) from vm v where v.host_id = h.id and deleted = 0) income\nfrom vm_host h",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Host Allocation",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 13,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 5
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select\nv.id,\ndatediff(current_timestamp(), v.created) age,\ndatediff(v.expires, current_timestamp()) days_to_expire,\n(select sum((case when currency = 'BTC' then amount / 1e11 else amount end) * rate) from lnvps.vm_payment where vm_id = v.id and is_paid = 1) total_payments\nfrom vm v\nwhere v.deleted = 0\norder by 3 asc",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Renewals",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "id"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 49
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "ref_code"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 91
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "created"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "amount"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 71
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "currency"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 78
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 5
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": []
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select v.id, \nv.ref_code, v.created, \n(case when vp.currency = 'BTC' then vp.amount / 1000 else vp.amount / 100 end) amount,\nvp.currency,\n(case when vp.currency = 'BTC' then vp.amount / 1000 else vp.amount / 100 end) * 0.33 comission\nfrom vm v, vm_payment vp\nwhere v.ref_code is not null\nand v.id = vp.vm_id\nand vp.is_paid = 1\norder by vp.created desc",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "RefCodes",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 13
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select vm_id, created, \n(case when currency = 'BTC' then (amount / 1e3) else amount / 100 end) amount, \n(case when currency = 'BTC' then (amount / 1e11) * rate else amount * rate end) amount_eur,\ncurrency,\n(case when payment_method = 0 then 'LN' else 'Revolut' end) method\nfrom vm_payment\nwhere is_paid = 1\norder by created desc\nlimit 20",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Payments",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "free"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "percentunit"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "region"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 70
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "used"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 59
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "size"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 70
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "size"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "sishort"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": []
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select i.cidr, i.region, i.used, i.size, (1-i.used/i.size) as free\nfrom (\nselect r.cidr, \n(select count(id) from lnvps.vm_ip_assignment where ip_range_id = r.id and deleted = 0) used,\nhr.name as region,\npow(2, (case when r.cidr like '%:%' then 128 else 32 end)-substring_index(r.cidr, '/', -1)) as size\nfrom ip_range r, vm_host_region hr\nwhere r.region_id = hr.id) i",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "IP Ranges",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"fillOpacity": 80,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineWidth": 1,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "amount_sats"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "locale"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "amount_eur"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "currencyEUR"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 23
|
||||
},
|
||||
"id": 6,
|
||||
"options": {
|
||||
"barRadius": 0,
|
||||
"barWidth": 0.97,
|
||||
"fullHighlight": false,
|
||||
"groupWidth": 0.7,
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"showValue": "auto",
|
||||
"stacking": "none",
|
||||
"tooltip": {
|
||||
"hideZeros": false,
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
},
|
||||
"xTickLabelRotation": 0,
|
||||
"xTickLabelSpacing": 0
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select DATE_FORMAT(created, '%Y-%m') as month,\nsum((case when currency = 'BTC' then (amount / 1e3) else amount / 100 end)) amount_sats, \nsum((case when currency = 'BTC' then (amount / 1e11) * rate else amount * rate end)) amount_eur\nfrom vm_payment\nwhere is_paid = 1\ngroup by DATE_FORMAT(created, '%Y-%m')\norder by created asc\n",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Income",
|
||||
"transformations": [
|
||||
{
|
||||
"id": "convertFieldType",
|
||||
"options": {
|
||||
"conversions": [
|
||||
{
|
||||
"destinationType": "time",
|
||||
"targetField": "month"
|
||||
}
|
||||
],
|
||||
"fields": {}
|
||||
}
|
||||
}
|
||||
],
|
||||
"type": "barchart"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"refresh": "",
|
||||
"schemaVersion": 40,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"hidden": true
|
||||
},
|
||||
"timezone": "browser",
|
||||
"title": "LNVPS",
|
||||
"uid": "begjfxfrjwu80e",
|
||||
"version": 26,
|
||||
"weekStart": ""
|
||||
}
|
5314
lnvps_api/Cargo.lock
generated
Normal file
5314
lnvps_api/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
88
lnvps_api/Cargo.toml
Normal file
88
lnvps_api/Cargo.toml
Normal file
@ -0,0 +1,88 @@
|
||||
[package]
|
||||
name = "lnvps_api"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "lnvps_api"
|
||||
path = "src/bin/api.rs"
|
||||
|
||||
[features]
|
||||
default = [
|
||||
"mikrotik",
|
||||
"nostr-dm",
|
||||
"nostr-dvm",
|
||||
"nostr-domain",
|
||||
"proxmox",
|
||||
"lnd",
|
||||
"cloudflare",
|
||||
"revolut",
|
||||
"bitvora",
|
||||
"tokio/sync",
|
||||
"tokio/io-util"
|
||||
]
|
||||
mikrotik = ["dep:reqwest"]
|
||||
nostr-dm = ["dep:nostr-sdk"]
|
||||
nostr-dvm = ["dep:nostr-sdk"]
|
||||
nostr-domain = ["lnvps_db/nostr-domain"]
|
||||
proxmox = ["dep:reqwest", "dep:ssh2", "dep:tokio-tungstenite"]
|
||||
libvirt = ["dep:virt", "dep:uuid", "dep:quick-xml"]
|
||||
lnd = ["dep:fedimint-tonic-lnd"]
|
||||
bitvora = ["dep:reqwest", "dep:tokio-stream"]
|
||||
cloudflare = ["dep:reqwest"]
|
||||
revolut = ["dep:reqwest", "dep:sha2", "dep:hmac"]
|
||||
|
||||
[dependencies]
|
||||
lnvps_db = { path = "../lnvps_db" }
|
||||
lnvps_common = { path = "../lnvps_common" }
|
||||
anyhow.workspace = true
|
||||
log.workspace = true
|
||||
env_logger.workspace = true
|
||||
tokio.workspace = true
|
||||
config.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
rocket.workspace = true
|
||||
hex.workspace = true
|
||||
rocket_okapi = { version = "0.9.0", features = ["swagger"] }
|
||||
schemars = { version = "0.8.22", features = ["chrono"] }
|
||||
chrono = { version = "0.4.38", features = ["serde"] }
|
||||
base64 = { version = "0.22.1", features = ["alloc"] }
|
||||
urlencoding = "2.1.3"
|
||||
ipnetwork = { git = "https://git.v0l.io/Kieran/ipnetwork.git", rev = "35977adc8103cfc232bc95fbc32f4e34f2b6a6d7" }
|
||||
rand = "0.9.0"
|
||||
clap = { version = "4.5.21", features = ["derive"] }
|
||||
ssh-key = "0.6.7"
|
||||
lettre = { version = "0.11.10", features = ["tokio1-native-tls"] }
|
||||
ws = { package = "rocket_ws", version = "0.1.1" }
|
||||
native-tls = "0.2.12"
|
||||
lnurl-rs = { version = "0.9.0", default-features = false }
|
||||
mustache = "0.9.0"
|
||||
|
||||
futures = "0.3.31"
|
||||
isocountry = "0.3.2"
|
||||
|
||||
#nostr-dm
|
||||
nostr = { version = "0.40.0", default-features = false, features = ["std"] }
|
||||
nostr-sdk = { version = "0.40.0", optional = true, default-features = false, features = ["nip44", "nip59"] }
|
||||
|
||||
#proxmox
|
||||
tokio-tungstenite = { version = "^0.21", features = ["native-tls"], optional = true }
|
||||
ssh2 = { version = "0.9.4", optional = true }
|
||||
reqwest = { version = "0.12.8", optional = true }
|
||||
|
||||
#libvirt
|
||||
virt = { git = "https://gitlab.com/libvirt/libvirt-rust.git", optional = true }
|
||||
#virtxml = {git = "https://gitlab.com/libvirt/libvirt-rust-xml.git", optional = true}
|
||||
uuid = { version = "1.16.0", features = ["v4", "serde"], optional = true }
|
||||
quick-xml = { version = "0.37.3", features = ["serde", "serialize"], optional = true }
|
||||
|
||||
#lnd
|
||||
fedimint-tonic-lnd = { version = "0.2.0", default-features = false, features = ["invoicesrpc"], optional = true }
|
||||
|
||||
#bitvora
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"], optional = true }
|
||||
|
||||
#revolut
|
||||
sha2 = { version = "0.10.8", optional = true }
|
||||
hmac = { version = "0.12.1", optional = true }
|
@ -5,6 +5,7 @@ lightning:
|
||||
cert: "/home/kieran/.polar/networks/2/volumes/lnd/alice/tls.cert"
|
||||
macaroon: "/home/kieran/.polar/networks/2/volumes/lnd/alice/data/chain/bitcoin/regtest/admin.macaroon"
|
||||
delete-after: 3
|
||||
public-url: "https://api.lnvps.net"
|
||||
provisioner:
|
||||
proxmox:
|
||||
read-only: false
|
161
lnvps_api/invoice.html
Normal file
161
lnvps_api/invoice.html
Normal file
@ -0,0 +1,161 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>{{payment.id}}</title>
|
||||
<meta charset="UTF-8"/>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css2?family=Source+Code+Pro:ital,wght@0,200..900;1,200..900&display=swap"
|
||||
rel="stylesheet"
|
||||
/>
|
||||
<style>
|
||||
html, body {
|
||||
margin: 0;
|
||||
font-size: 12px;
|
||||
font-family: "Source Code Pro", monospace;
|
||||
}
|
||||
|
||||
@media screen {
|
||||
.page {
|
||||
margin-left: 4rem;
|
||||
margin-right: 4rem;
|
||||
}
|
||||
}
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
gap: 2rem;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
font-size: 3rem;
|
||||
margin: 2rem 0;
|
||||
}
|
||||
|
||||
.billing {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
}
|
||||
|
||||
.flex-col {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.2rem;
|
||||
}
|
||||
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
td, th {
|
||||
border: 1px solid #ccc;
|
||||
padding: 0.4em 0.1em;
|
||||
}
|
||||
|
||||
.total {
|
||||
text-align: end;
|
||||
font-size: 16px;
|
||||
font-weight: bold;
|
||||
padding: 0.5em 0.2em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="page">
|
||||
<div class="header">
|
||||
LNVPS
|
||||
<img height="48" width="48" src="https://lnvps.net/logo.jpg" alt="logo"/>
|
||||
</div>
|
||||
<hr/>
|
||||
<h2>Invoice</h2>
|
||||
<div class="flex-col">
|
||||
<div>
|
||||
<b>ID:</b>
|
||||
{{payment.id}}
|
||||
</div>
|
||||
<div>
|
||||
<b>Date:</b>
|
||||
{{payment.created}}
|
||||
</div>
|
||||
<div>
|
||||
<b>Status:</b>
|
||||
{{#payment.is_paid}}Paid{{/payment.is_paid}}
|
||||
{{^payment.is_paid}}Unpaid{{/payment.is_paid}}
|
||||
</div>
|
||||
<div>
|
||||
<b>Nostr Pubkey:</b>
|
||||
{{npub}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="billing">
|
||||
<div class="flex-col">
|
||||
<h2>Bill To:</h2>
|
||||
<div>{{user.name}}</div>
|
||||
<div>{{user.address_1}}</div>
|
||||
<div>{{user.address_2}}</div>
|
||||
<div>{{user.city}}</div>
|
||||
<div>{{user.state}}</div>
|
||||
<div>{{user.postcode}}</div>
|
||||
<div>{{user.country}}</div>
|
||||
<div>{{user.country_code}}</div>
|
||||
<div>{{user.tax_id}}</div>
|
||||
</div>
|
||||
{{#company}}
|
||||
<div class="flex-col">
|
||||
<h2> </h2>
|
||||
<div>{{company.name}}</div>
|
||||
<div>{{company.address_1}}</div>
|
||||
<div>{{company.address_2}}</div>
|
||||
<div>{{company.city}}</div>
|
||||
<div>{{company.state}}</div>
|
||||
<div>{{company.postcode}}</div>
|
||||
<div>{{company.country}}</div>
|
||||
<div>{{company.country_code}}</div>
|
||||
<div>{{company.tax_id}}</div>
|
||||
</div>
|
||||
{{/company}}
|
||||
</div>
|
||||
<hr/>
|
||||
<h2>Details:</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Description</th>
|
||||
<th>Currency</th>
|
||||
<th>Gross</th>
|
||||
<th>Taxes</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
VM Renewal #{{vm.id}}
|
||||
- {{vm.template.name}}
|
||||
- {{vm.image.distribution}} {{vm.image.version}}
|
||||
- {{payment.time}} seconds
|
||||
</td>
|
||||
<td>{{payment.currency}}</td>
|
||||
<td>{{payment.amount}}</td>
|
||||
<td>{{payment.tax}}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td colspan="4" class="total">
|
||||
Total: {{total}}
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<br/>
|
||||
<b>
|
||||
All BTC amounts are in milli-satoshis and all fiat amounts are in cents.
|
||||
</b>
|
||||
<hr/>
|
||||
<small>
|
||||
(c) {{year}} LNVPS.net - Generated at {{current_date}}
|
||||
</small>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,6 +1,8 @@
|
||||
use rocket::Route;
|
||||
|
||||
mod model;
|
||||
#[cfg(feature = "nostr-domain")]
|
||||
mod nostr_domain;
|
||||
mod routes;
|
||||
mod webhook;
|
||||
|
||||
@ -10,4 +12,5 @@ pub fn routes() -> Vec<Route> {
|
||||
r
|
||||
}
|
||||
|
||||
pub use webhook::WebhookMessage;
|
||||
pub use webhook::WEBHOOK_BRIDGE;
|
@ -1,20 +1,21 @@
|
||||
use crate::exchange::{alt_prices, Currency, CurrencyAmount, ExchangeRateService};
|
||||
use crate::provisioner::{PricingData, PricingEngine};
|
||||
use crate::provisioner::PricingEngine;
|
||||
use crate::status::VmState;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::{
|
||||
LNVpsDb, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate, VmHost,
|
||||
LNVpsDb, PaymentMethod, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate,
|
||||
VmHostRegion, VmTemplate,
|
||||
};
|
||||
use nostr::util::hex;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
pub struct ApiVmStatus {
|
||||
/// Unique VM ID (Same in proxmox)
|
||||
pub id: u64,
|
||||
@ -36,7 +37,7 @@ pub struct ApiVmStatus {
|
||||
pub status: VmState,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
pub struct ApiUserSshKey {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
@ -53,7 +54,7 @@ impl From<lnvps_db::UserSshKey> for ApiUserSshKey {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
pub struct ApiVmIpAssignment {
|
||||
pub id: u64,
|
||||
pub ip: String,
|
||||
@ -79,7 +80,7 @@ impl ApiVmIpAssignment {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, JsonSchema, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum DiskType {
|
||||
HDD = 0,
|
||||
@ -95,16 +96,16 @@ impl From<lnvps_db::DiskType> for DiskType {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<lnvps_db::DiskType> for DiskType {
|
||||
fn into(self) -> lnvps_db::DiskType {
|
||||
match self {
|
||||
impl From<DiskType> for lnvps_db::DiskType {
|
||||
fn from(val: DiskType) -> Self {
|
||||
match val {
|
||||
DiskType::HDD => lnvps_db::DiskType::HDD,
|
||||
DiskType::SSD => lnvps_db::DiskType::SSD,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, JsonSchema, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum DiskInterface {
|
||||
SATA = 0,
|
||||
@ -132,7 +133,7 @@ impl From<DiskInterface> for lnvps_db::DiskInterface {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
pub struct ApiTemplatesResponse {
|
||||
pub templates: Vec<ApiVmTemplate>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@ -143,19 +144,20 @@ impl ApiTemplatesResponse {
|
||||
pub async fn expand_pricing(&mut self, rates: &Arc<dyn ExchangeRateService>) -> Result<()> {
|
||||
let rates = rates.list_rates().await?;
|
||||
|
||||
for mut template in &mut self.templates {
|
||||
let list_price = CurrencyAmount(template.cost_plan.currency, template.cost_plan.amount);
|
||||
for template in &mut self.templates {
|
||||
let list_price =
|
||||
CurrencyAmount::from_f32(template.cost_plan.currency, template.cost_plan.amount);
|
||||
for alt_price in alt_prices(&rates, list_price) {
|
||||
template.cost_plan.other_price.push(ApiPrice {
|
||||
currency: alt_price.0,
|
||||
amount: alt_price.1,
|
||||
currency: alt_price.currency(),
|
||||
amount: alt_price.value_f32(),
|
||||
});
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
pub struct ApiCustomTemplateParams {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
@ -164,8 +166,6 @@ pub struct ApiCustomTemplateParams {
|
||||
pub min_cpu: u16,
|
||||
pub min_memory: u64,
|
||||
pub max_memory: u64,
|
||||
pub min_disk: u64,
|
||||
pub max_disk: u64,
|
||||
pub disks: Vec<ApiCustomTemplateDiskParam>,
|
||||
}
|
||||
|
||||
@ -176,7 +176,7 @@ impl ApiCustomTemplateParams {
|
||||
region: &VmHostRegion,
|
||||
max_cpu: u16,
|
||||
max_memory: u64,
|
||||
max_disk: u64,
|
||||
max_disk: &HashMap<(DiskType, DiskInterface), u64>,
|
||||
) -> Result<Self> {
|
||||
const GB: u64 = 1024 * 1024 * 1024;
|
||||
Ok(ApiCustomTemplateParams {
|
||||
@ -190,14 +190,16 @@ impl ApiCustomTemplateParams {
|
||||
min_cpu: 1,
|
||||
min_memory: GB,
|
||||
max_memory,
|
||||
min_disk: GB * 5,
|
||||
max_disk,
|
||||
disks: disks
|
||||
.iter()
|
||||
.filter(|d| d.pricing_id == pricing.id)
|
||||
.map(|d| ApiCustomTemplateDiskParam {
|
||||
disk_type: d.kind.into(),
|
||||
disk_interface: d.interface.into(),
|
||||
.filter_map(|d| {
|
||||
Some(ApiCustomTemplateDiskParam {
|
||||
min_disk: GB * 5,
|
||||
max_disk: *max_disk.get(&(d.kind.into(), d.interface.into()))?,
|
||||
disk_type: d.kind.into(),
|
||||
disk_interface: d.interface.into(),
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
@ -205,6 +207,8 @@ impl ApiCustomTemplateParams {
|
||||
}
|
||||
#[derive(Clone, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct ApiCustomTemplateDiskParam {
|
||||
pub min_disk: u64,
|
||||
pub max_disk: u64,
|
||||
pub disk_type: DiskType,
|
||||
pub disk_interface: DiskInterface,
|
||||
}
|
||||
@ -251,8 +255,8 @@ pub struct ApiPrice {
|
||||
impl From<CurrencyAmount> for ApiPrice {
|
||||
fn from(value: CurrencyAmount) -> Self {
|
||||
Self {
|
||||
currency: value.0,
|
||||
amount: value.1,
|
||||
currency: value.currency(),
|
||||
amount: value.value_f32(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,8 +339,8 @@ impl ApiVmTemplate {
|
||||
cpu: template.cpu,
|
||||
memory: template.memory,
|
||||
disk_size: template.disk_size,
|
||||
disk_type: template.disk_type.clone().into(),
|
||||
disk_interface: template.disk_interface.clone().into(),
|
||||
disk_type: template.disk_type.into(),
|
||||
disk_interface: template.disk_interface.into(),
|
||||
cost_plan: ApiVmCostPlan {
|
||||
id: cost_plan.id,
|
||||
name: cost_plan.name.clone(),
|
||||
@ -399,9 +403,44 @@ pub struct VMPatchRequest {
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
pub struct AccountPatchRequest {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
pub contact_nip17: bool,
|
||||
pub contact_email: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub country_code: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address_1: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address_2: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub city: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub postcode: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tax_id: Option<String>,
|
||||
}
|
||||
|
||||
impl From<lnvps_db::User> for AccountPatchRequest {
|
||||
fn from(user: lnvps_db::User) -> Self {
|
||||
AccountPatchRequest {
|
||||
email: user.email,
|
||||
contact_nip17: user.contact_nip17,
|
||||
contact_email: user.contact_email,
|
||||
country_code: user.country_code,
|
||||
name: user.billing_name,
|
||||
address_1: user.billing_address_1,
|
||||
address_2: user.billing_address_2,
|
||||
state: user.billing_state,
|
||||
city: user.billing_city,
|
||||
postcode: user.billing_postcode,
|
||||
tax_id: user.billing_tax_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
@ -452,6 +491,7 @@ pub struct ApiVmOsImage {
|
||||
pub flavour: String,
|
||||
pub version: String,
|
||||
pub release_date: DateTime<Utc>,
|
||||
pub default_username: Option<String>,
|
||||
}
|
||||
|
||||
impl From<lnvps_db::VmOsImage> for ApiVmOsImage {
|
||||
@ -462,20 +502,23 @@ impl From<lnvps_db::VmOsImage> for ApiVmOsImage {
|
||||
flavour: image.flavour,
|
||||
version: image.version,
|
||||
release_date: image.release_date,
|
||||
default_username: image.default_username,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
pub struct ApiVmPayment {
|
||||
/// Payment hash hex
|
||||
pub id: String,
|
||||
pub vm_id: u64,
|
||||
pub created: DateTime<Utc>,
|
||||
pub expires: DateTime<Utc>,
|
||||
pub amount: u64,
|
||||
pub invoice: String,
|
||||
pub tax: u64,
|
||||
pub currency: String,
|
||||
pub is_paid: bool,
|
||||
pub data: ApiPaymentData,
|
||||
pub time: u64,
|
||||
}
|
||||
|
||||
impl From<lnvps_db::VmPayment> for ApiVmPayment {
|
||||
@ -486,8 +529,108 @@ impl From<lnvps_db::VmPayment> for ApiVmPayment {
|
||||
created: value.created,
|
||||
expires: value.expires,
|
||||
amount: value.amount,
|
||||
invoice: value.invoice,
|
||||
tax: value.tax,
|
||||
currency: value.currency,
|
||||
is_paid: value.is_paid,
|
||||
time: value.time_value,
|
||||
data: match &value.payment_method {
|
||||
PaymentMethod::Lightning => ApiPaymentData::Lightning(value.external_data),
|
||||
PaymentMethod::Revolut => {
|
||||
#[derive(Deserialize)]
|
||||
struct RevolutData {
|
||||
pub token: String,
|
||||
}
|
||||
let data: RevolutData = serde_json::from_str(&value.external_data).unwrap();
|
||||
ApiPaymentData::Revolut { token: data.token }
|
||||
}
|
||||
PaymentMethod::Paypal => {
|
||||
todo!()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
pub struct ApiPaymentInfo {
|
||||
pub name: ApiPaymentMethod,
|
||||
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub metadata: HashMap<String, String>,
|
||||
|
||||
pub currencies: Vec<Currency>,
|
||||
}
|
||||
|
||||
/// Payment data related to the payment method
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ApiPaymentData {
|
||||
/// Just an LN invoice
|
||||
Lightning(String),
|
||||
/// Revolut order data
|
||||
Revolut {
|
||||
/// Order token
|
||||
token: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ApiPaymentMethod {
|
||||
#[default]
|
||||
Lightning,
|
||||
Revolut,
|
||||
Paypal,
|
||||
}
|
||||
|
||||
impl From<PaymentMethod> for ApiPaymentMethod {
|
||||
fn from(value: PaymentMethod) -> Self {
|
||||
match value {
|
||||
PaymentMethod::Lightning => ApiPaymentMethod::Lightning,
|
||||
PaymentMethod::Revolut => ApiPaymentMethod::Revolut,
|
||||
PaymentMethod::Paypal => ApiPaymentMethod::Paypal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
pub struct ApiCompany {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub country_code: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address_1: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub address_2: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub city: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub postcode: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tax_id: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub phone: Option<String>,
|
||||
}
|
||||
|
||||
impl From<lnvps_db::Company> for ApiCompany {
|
||||
fn from(value: lnvps_db::Company) -> Self {
|
||||
Self {
|
||||
email: value.email,
|
||||
country_code: value.country_code,
|
||||
name: value.name,
|
||||
id: value.id,
|
||||
address_1: value.address_1,
|
||||
address_2: value.address_2,
|
||||
state: value.state,
|
||||
city: value.city,
|
||||
postcode: value.postcode,
|
||||
tax_id: value.tax_id,
|
||||
phone: value.phone,
|
||||
}
|
||||
}
|
||||
}
|
202
lnvps_api/src/api/nostr_domain.rs
Normal file
202
lnvps_api/src/api/nostr_domain.rs
Normal file
@ -0,0 +1,202 @@
|
||||
use crate::api::routes::{ApiData, ApiResult};
|
||||
use crate::nip98::Nip98Auth;
|
||||
use crate::settings::Settings;
|
||||
use chrono::{DateTime, Utc};
|
||||
use lnvps_db::{LNVPSNostrDb, LNVpsDb, NostrDomain, NostrDomainHandle};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::serde::{Deserialize, Serialize};
|
||||
use rocket::{delete, get, post, routes, Route, State};
|
||||
use rocket_okapi::okapi::openapi3::OpenApi;
|
||||
use rocket_okapi::settings::OpenApiSettings;
|
||||
use rocket_okapi::{openapi, openapi_get_routes, openapi_routes, JsonSchema};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![
|
||||
v1_nostr_domains,
|
||||
v1_create_nostr_domain,
|
||||
v1_list_nostr_domain_handles,
|
||||
v1_create_nostr_domain_handle,
|
||||
v1_delete_nostr_domain_handle
|
||||
]
|
||||
}
|
||||
|
||||
#[openapi(tag = "NIP05")]
|
||||
#[get("/api/v1/nostr/domain")]
|
||||
async fn v1_nostr_domains(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
settings: &State<Settings>,
|
||||
) -> ApiResult<ApiDomainsResponse> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
|
||||
let domains = db.list_domains(uid).await?;
|
||||
ApiData::ok(ApiDomainsResponse {
|
||||
domains: domains.into_iter().map(|d| d.into()).collect(),
|
||||
cname: settings.nostr_address_host.clone().unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
#[openapi(tag = "NIP05")]
|
||||
#[post("/api/v1/nostr/domain", format = "json", data = "<data>")]
|
||||
async fn v1_create_nostr_domain(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
data: Json<NameRequest>,
|
||||
) -> ApiResult<ApiNostrDomain> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
|
||||
let mut dom = NostrDomain {
|
||||
owner_id: uid,
|
||||
name: data.name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let dom_id = db.insert_domain(&dom).await?;
|
||||
dom.id = dom_id;
|
||||
|
||||
ApiData::ok(dom.into())
|
||||
}
|
||||
|
||||
#[openapi(tag = "NIP05")]
|
||||
#[get("/api/v1/nostr/domain/<dom>/handle")]
|
||||
async fn v1_list_nostr_domain_handles(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
dom: u64,
|
||||
) -> ApiResult<Vec<ApiNostrDomainHandle>> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
|
||||
let domain = db.get_domain(dom).await?;
|
||||
if domain.owner_id != uid {
|
||||
return ApiData::err("Access denied");
|
||||
}
|
||||
|
||||
let handles = db.list_handles(domain.id).await?;
|
||||
ApiData::ok(handles.into_iter().map(|h| h.into()).collect())
|
||||
}
|
||||
|
||||
#[openapi(tag = "NIP05")]
|
||||
#[post("/api/v1/nostr/domain/<dom>/handle", format = "json", data = "<data>")]
|
||||
async fn v1_create_nostr_domain_handle(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
dom: u64,
|
||||
data: Json<HandleRequest>,
|
||||
) -> ApiResult<ApiNostrDomainHandle> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
|
||||
let domain = db.get_domain(dom).await?;
|
||||
if domain.owner_id != uid {
|
||||
return ApiData::err("Access denied");
|
||||
}
|
||||
|
||||
let h_pubkey = hex::decode(&data.pubkey)?;
|
||||
if h_pubkey.len() != 32 {
|
||||
return ApiData::err("Invalid public key");
|
||||
}
|
||||
|
||||
let mut handle = NostrDomainHandle {
|
||||
domain_id: domain.id,
|
||||
handle: data.name.clone(),
|
||||
pubkey: h_pubkey,
|
||||
..Default::default()
|
||||
};
|
||||
let id = db.insert_handle(&handle).await?;
|
||||
handle.id = id;
|
||||
|
||||
ApiData::ok(handle.into())
|
||||
}
|
||||
|
||||
#[openapi(tag = "NIP05")]
|
||||
#[delete("/api/v1/nostr/domain/<dom>/handle/<handle>")]
|
||||
async fn v1_delete_nostr_domain_handle(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
dom: u64,
|
||||
handle: u64,
|
||||
) -> ApiResult<()> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
|
||||
let domain = db.get_domain(dom).await?;
|
||||
if domain.owner_id != uid {
|
||||
return ApiData::err("Access denied");
|
||||
}
|
||||
db.delete_handle(handle).await?;
|
||||
ApiData::ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
struct NameRequest {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, JsonSchema)]
|
||||
struct HandleRequest {
|
||||
pub pubkey: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
struct ApiNostrDomain {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub enabled: bool,
|
||||
pub handles: u64,
|
||||
pub created: DateTime<Utc>,
|
||||
pub relays: Vec<String>,
|
||||
}
|
||||
|
||||
impl From<NostrDomain> for ApiNostrDomain {
|
||||
fn from(value: NostrDomain) -> Self {
|
||||
Self {
|
||||
id: value.id,
|
||||
name: value.name,
|
||||
enabled: value.enabled,
|
||||
handles: value.handles as u64,
|
||||
created: value.created,
|
||||
relays: if let Some(r) = value.relays {
|
||||
r.split(',').map(|s| s.to_string()).collect()
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
struct ApiNostrDomainHandle {
|
||||
pub id: u64,
|
||||
pub domain_id: u64,
|
||||
pub handle: String,
|
||||
pub created: DateTime<Utc>,
|
||||
pub pubkey: String,
|
||||
pub relays: Vec<String>,
|
||||
}
|
||||
|
||||
impl From<NostrDomainHandle> for ApiNostrDomainHandle {
|
||||
fn from(value: NostrDomainHandle) -> Self {
|
||||
Self {
|
||||
id: value.id,
|
||||
domain_id: value.domain_id,
|
||||
created: value.created,
|
||||
handle: value.handle,
|
||||
pubkey: hex::encode(value.pubkey),
|
||||
relays: if let Some(r) = value.relays {
|
||||
r.split(',').map(|s| s.to_string()).collect()
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, JsonSchema)]
|
||||
struct ApiDomainsResponse {
|
||||
pub domains: Vec<ApiNostrDomain>,
|
||||
pub cname: String,
|
||||
}
|
@ -1,36 +1,50 @@
|
||||
use crate::api::model::{
|
||||
AccountPatchRequest, ApiCustomTemplateDiskParam, ApiCustomTemplateParams, ApiCustomVmOrder,
|
||||
ApiCustomVmRequest, ApiPrice, ApiTemplatesResponse, ApiUserSshKey, ApiVmHostRegion,
|
||||
AccountPatchRequest, ApiCompany, ApiCustomTemplateParams, ApiCustomVmOrder, ApiCustomVmRequest,
|
||||
ApiPaymentInfo, ApiPaymentMethod, ApiPrice, ApiTemplatesResponse, ApiUserSshKey,
|
||||
ApiVmIpAssignment, ApiVmOsImage, ApiVmPayment, ApiVmStatus, ApiVmTemplate, CreateSshKey,
|
||||
CreateVmRequest, VMPatchRequest,
|
||||
};
|
||||
use crate::exchange::ExchangeRateService;
|
||||
use crate::exchange::{Currency, CurrencyAmount, ExchangeRateService};
|
||||
use crate::host::{get_host_client, FullVmInfo, TimeSeries, TimeSeriesData};
|
||||
use crate::nip98::Nip98Auth;
|
||||
use crate::provisioner::{HostCapacityService, LNVpsProvisioner, PricingEngine};
|
||||
use crate::settings::Settings;
|
||||
use crate::status::{VmState, VmStateCache};
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::{DateTime, Datelike, Utc};
|
||||
use futures::future::join_all;
|
||||
use lnvps_db::{IpRange, LNVpsDb, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate};
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use isocountry::CountryCode;
|
||||
use lnurl::pay::{LnURLPayInvoice, PayResponse};
|
||||
use lnurl::Tag;
|
||||
use lnvps_db::{
|
||||
IpRange, LNVpsDb, PaymentMethod, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate,
|
||||
};
|
||||
use log::{error, info};
|
||||
use nostr::util::hex;
|
||||
use rocket::futures::{SinkExt, StreamExt};
|
||||
use nostr::{ToBech32, Url};
|
||||
use rocket::http::ContentType;
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{get, patch, post, Responder, Route, State};
|
||||
use rocket::{get, patch, post, routes, Responder, Route, State};
|
||||
use rocket_okapi::gen::OpenApiGenerator;
|
||||
use rocket_okapi::okapi::openapi3::Responses;
|
||||
use rocket_okapi::response::OpenApiResponderInner;
|
||||
use rocket_okapi::{openapi, openapi_get_routes};
|
||||
use rocket_okapi::{openapi, openapi_get_routes, openapi_routes};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ssh_key::PublicKey;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Display;
|
||||
use std::io::{BufWriter, Cursor};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::sync::mpsc::{Sender, UnboundedSender};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
openapi_get_routes![
|
||||
let mut routes = vec![];
|
||||
|
||||
let mut api_routes = openapi_get_routes![
|
||||
v1_get_account,
|
||||
v1_patch_account,
|
||||
v1_list_vms,
|
||||
@ -45,17 +59,32 @@ pub fn routes() -> Vec<Route> {
|
||||
v1_start_vm,
|
||||
v1_stop_vm,
|
||||
v1_restart_vm,
|
||||
v1_reinstall_vm,
|
||||
v1_patch_vm,
|
||||
v1_time_series,
|
||||
v1_custom_template_calc,
|
||||
v1_create_custom_vm_order
|
||||
]
|
||||
v1_create_custom_vm_order,
|
||||
v1_get_payment_methods,
|
||||
v1_payment_history
|
||||
];
|
||||
#[cfg(feature = "nostr-domain")]
|
||||
api_routes.append(&mut super::nostr_domain::routes());
|
||||
routes.append(&mut api_routes);
|
||||
|
||||
routes.append(&mut routes![
|
||||
v1_terminal_proxy,
|
||||
v1_lnurlp,
|
||||
v1_renew_vm_lnurlp,
|
||||
v1_get_payment_invoice
|
||||
]);
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
type ApiResult<T> = Result<Json<ApiData<T>>, ApiError>;
|
||||
pub type ApiResult<T> = Result<Json<ApiData<T>>, ApiError>;
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
struct ApiData<T: Serialize> {
|
||||
pub struct ApiData<T: Serialize> {
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
@ -70,7 +99,7 @@ impl<T: Serialize> ApiData<T> {
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema, Responder)]
|
||||
#[response(status = 500)]
|
||||
struct ApiError {
|
||||
pub struct ApiError {
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
@ -103,6 +132,18 @@ async fn v1_patch_account(
|
||||
user.email = req.email.clone();
|
||||
user.contact_nip17 = req.contact_nip17;
|
||||
user.contact_email = req.contact_email;
|
||||
user.country_code = req
|
||||
.country_code
|
||||
.as_ref()
|
||||
.and_then(|c| CountryCode::for_alpha3(c).ok())
|
||||
.map(|c| c.alpha3().to_string());
|
||||
user.billing_name = req.name.clone();
|
||||
user.billing_address_1 = req.address_1.clone();
|
||||
user.billing_address_2 = req.address_2.clone();
|
||||
user.billing_city = req.city.clone();
|
||||
user.billing_state = req.state.clone();
|
||||
user.billing_postcode = req.postcode.clone();
|
||||
user.billing_tax_id = req.tax_id.clone();
|
||||
|
||||
db.update_user(&user).await?;
|
||||
ApiData::ok(())
|
||||
@ -119,11 +160,7 @@ async fn v1_get_account(
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
let user = db.get_user(uid).await?;
|
||||
|
||||
ApiData::ok(AccountPatchRequest {
|
||||
email: user.email,
|
||||
contact_nip17: user.contact_nip17,
|
||||
contact_email: user.contact_email,
|
||||
})
|
||||
ApiData::ok(user.into())
|
||||
}
|
||||
|
||||
async fn vm_to_status(
|
||||
@ -143,7 +180,7 @@ async fn vm_to_status(
|
||||
.map(|i| (i.id, i))
|
||||
.collect();
|
||||
|
||||
let template = ApiVmTemplate::from_vm(&db, &vm).await?;
|
||||
let template = ApiVmTemplate::from_vm(db, &vm).await?;
|
||||
Ok(ApiVmStatus {
|
||||
id: vm.id,
|
||||
created: vm.created,
|
||||
@ -309,7 +346,7 @@ async fn v1_list_vm_templates(
|
||||
})
|
||||
.collect();
|
||||
let custom_templates: Vec<VmCustomPricing> =
|
||||
join_all(regions.iter().map(|(k, _)| db.list_custom_pricing(*k)))
|
||||
join_all(regions.keys().map(|k| db.list_custom_pricing(*k)))
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|r| r.ok())
|
||||
@ -336,25 +373,32 @@ async fn v1_list_vm_templates(
|
||||
let max_memory = templates.iter().map(|t| t.memory).max().unwrap_or(GB * 2);
|
||||
let max_disk = templates
|
||||
.iter()
|
||||
.map(|t| t.disk_size)
|
||||
.max()
|
||||
.unwrap_or(GB * 5);
|
||||
.map(|t| (t.disk_type, t.disk_interface, t.disk_size))
|
||||
.fold(HashMap::new(), |mut acc, v| {
|
||||
let k = (v.0.into(), v.1.into());
|
||||
if let Some(mut x) = acc.get_mut(&k) {
|
||||
if *x < v.2 {
|
||||
*x = v.2;
|
||||
}
|
||||
} else {
|
||||
acc.insert(k, v.2);
|
||||
}
|
||||
return acc;
|
||||
});
|
||||
Some(
|
||||
custom_templates
|
||||
.into_iter()
|
||||
.filter_map(|t| {
|
||||
let region = regions.get(&t.region_id)?;
|
||||
Some(
|
||||
ApiCustomTemplateParams::from(
|
||||
&t,
|
||||
&custom_template_disks,
|
||||
region,
|
||||
max_cpu,
|
||||
max_memory,
|
||||
max_disk,
|
||||
)
|
||||
.ok()?,
|
||||
ApiCustomTemplateParams::from(
|
||||
&t,
|
||||
&custom_template_disks,
|
||||
region,
|
||||
max_cpu,
|
||||
max_memory,
|
||||
&max_disk,
|
||||
)
|
||||
.ok()
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
@ -376,7 +420,7 @@ async fn v1_custom_template_calc(
|
||||
|
||||
let price = PricingEngine::get_custom_vm_cost_amount(db, 0, &template).await?;
|
||||
ApiData::ok(ApiPrice {
|
||||
currency: price.currency.clone(),
|
||||
currency: price.currency,
|
||||
amount: price.total(),
|
||||
})
|
||||
}
|
||||
@ -484,12 +528,13 @@ async fn v1_create_vm_order(
|
||||
|
||||
/// Renew(Extend) a VM
|
||||
#[openapi(tag = "VM")]
|
||||
#[get("/api/v1/vm/<id>/renew")]
|
||||
#[get("/api/v1/vm/<id>/renew?<method>")]
|
||||
async fn v1_renew_vm(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
provisioner: &State<Arc<LNVpsProvisioner>>,
|
||||
id: u64,
|
||||
method: Option<&str>,
|
||||
) -> ApiResult<ApiVmPayment> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
@ -498,10 +543,76 @@ async fn v1_renew_vm(
|
||||
return ApiData::err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let rsp = provisioner.renew(id).await?;
|
||||
let rsp = provisioner
|
||||
.renew(
|
||||
id,
|
||||
method
|
||||
.and_then(|m| PaymentMethod::from_str(m).ok())
|
||||
.unwrap_or(PaymentMethod::Lightning),
|
||||
)
|
||||
.await?;
|
||||
ApiData::ok(rsp.into())
|
||||
}
|
||||
|
||||
/// Extend a VM by LNURL payment
|
||||
#[get("/api/v1/vm/<id>/renew-lnurlp?<amount>")]
|
||||
async fn v1_renew_vm_lnurlp(
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
provisioner: &State<Arc<LNVpsProvisioner>>,
|
||||
id: u64,
|
||||
amount: u64,
|
||||
) -> Result<Json<LnURLPayInvoice>, &'static str> {
|
||||
let vm = db.get_vm(id).await.map_err(|_e| "VM not found")?;
|
||||
if vm.deleted {
|
||||
return Err("VM not found");
|
||||
}
|
||||
if amount < 1000 {
|
||||
return Err("Amount must be greater than 1000");
|
||||
}
|
||||
|
||||
let rsp = provisioner
|
||||
.renew_amount(
|
||||
id,
|
||||
CurrencyAmount::millisats(amount),
|
||||
PaymentMethod::Lightning,
|
||||
)
|
||||
.await
|
||||
.map_err(|_| "Error generating invoice")?;
|
||||
|
||||
// external_data is pr for lightning payment method
|
||||
Ok(Json(LnURLPayInvoice::new(rsp.external_data)))
|
||||
}
|
||||
|
||||
/// LNURL ad-hoc extend vm
|
||||
#[get("/.well-known/lnurlp/<id>")]
|
||||
async fn v1_lnurlp(
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
settings: &State<Settings>,
|
||||
id: u64,
|
||||
) -> Result<Json<PayResponse>, &'static str> {
|
||||
let vm = db.get_vm(id).await.map_err(|_e| "VM not found")?;
|
||||
if vm.deleted {
|
||||
return Err("VM not found");
|
||||
}
|
||||
|
||||
let meta = vec![vec!["text/plain".to_string(), format!("Extend VM {}", id)]];
|
||||
let rsp = PayResponse {
|
||||
callback: Url::parse(&settings.public_url)
|
||||
.map_err(|_| "Invalid public url")?
|
||||
.join(&format!("/api/v1/vm/{}/renew-lnurlp", id))
|
||||
.map_err(|_| "Could not get callback url")?
|
||||
.to_string(),
|
||||
max_sendable: 1_000_000_000,
|
||||
min_sendable: 1_000, // TODO: calc min by using 1s extend time
|
||||
tag: Tag::PayRequest,
|
||||
metadata: serde_json::to_string(&meta).map_err(|_e| "Failed to serialize metadata")?,
|
||||
comment_allowed: None,
|
||||
allows_nostr: None,
|
||||
nostr_pubkey: None,
|
||||
};
|
||||
Ok(Json(rsp))
|
||||
}
|
||||
|
||||
/// Start a VM
|
||||
#[openapi(tag = "VM")]
|
||||
#[patch("/api/v1/vm/<id>/start")]
|
||||
@ -576,6 +687,32 @@ async fn v1_restart_vm(
|
||||
ApiData::ok(())
|
||||
}
|
||||
|
||||
/// Re-install a VM
|
||||
#[openapi(tag = "VM")]
|
||||
#[patch("/api/v1/vm/<id>/re-install")]
|
||||
async fn v1_reinstall_vm(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
settings: &State<Settings>,
|
||||
worker: &State<UnboundedSender<WorkJob>>,
|
||||
id: u64,
|
||||
) -> ApiResult<()> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
let vm = db.get_vm(id).await?;
|
||||
if uid != vm.user_id {
|
||||
return ApiData::err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let host = db.get_host(vm.host_id).await?;
|
||||
let client = get_host_client(&host, &settings.provisioner)?;
|
||||
let info = FullVmInfo::load(vm.id, (*db).clone()).await?;
|
||||
client.reinstall_vm(&info).await?;
|
||||
|
||||
worker.send(WorkJob::CheckVm { vm_id: id })?;
|
||||
ApiData::ok(())
|
||||
}
|
||||
|
||||
#[openapi(tag = "VM")]
|
||||
#[get("/api/v1/vm/<id>/time-series")]
|
||||
async fn v1_time_series(
|
||||
@ -596,6 +733,134 @@ async fn v1_time_series(
|
||||
ApiData::ok(client.get_time_series_data(&vm, TimeSeries::Hourly).await?)
|
||||
}
|
||||
|
||||
#[get("/api/v1/vm/<id>/console?<auth>")]
|
||||
async fn v1_terminal_proxy(
|
||||
auth: &str,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
settings: &State<Settings>,
|
||||
id: u64,
|
||||
ws: ws::WebSocket,
|
||||
) -> Result<ws::Channel<'static>, &'static str> {
|
||||
return Err("Disabled");
|
||||
let auth = Nip98Auth::from_base64(auth).map_err(|e| "Missing or invalid auth param")?;
|
||||
if auth
|
||||
.check(&format!("/api/v1/vm/{id}/console"), "GET")
|
||||
.is_err()
|
||||
{
|
||||
return Err("Invalid auth event");
|
||||
}
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await.map_err(|_| "Insert failed")?;
|
||||
let vm = db.get_vm(id).await.map_err(|_| "VM not found")?;
|
||||
if uid != vm.user_id {
|
||||
return Err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let host = db
|
||||
.get_host(vm.host_id)
|
||||
.await
|
||||
.map_err(|_| "VM host not found")?;
|
||||
let client =
|
||||
get_host_client(&host, &settings.provisioner).map_err(|_| "Failed to get host client")?;
|
||||
|
||||
let mut ws_upstream = client.connect_terminal(&vm).await.map_err(|e| {
|
||||
error!("Failed to start terminal proxy: {}", e);
|
||||
"Failed to open terminal proxy"
|
||||
})?;
|
||||
let ws = ws.config(Default::default());
|
||||
Ok(ws.channel(move |mut stream| {
|
||||
use ws::*;
|
||||
|
||||
Box::pin(async move {
|
||||
async fn process_client<E>(
|
||||
msg: Result<Message, E>,
|
||||
ws_upstream: &mut Sender<Vec<u8>>,
|
||||
) -> Result<()>
|
||||
where
|
||||
E: Display,
|
||||
{
|
||||
match msg {
|
||||
Ok(m) => {
|
||||
let m_up = match m {
|
||||
Message::Text(t) => t.as_bytes().to_vec(),
|
||||
_ => panic!("todo"),
|
||||
};
|
||||
if let Err(e) = ws_upstream.send(m_up).await {
|
||||
bail!("Failed to send msg to upstream: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
bail!("Failed to read from client: {}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_upstream<E>(
|
||||
msg: Result<Vec<u8>, E>,
|
||||
tx_client: &mut stream::DuplexStream,
|
||||
) -> Result<()>
|
||||
where
|
||||
E: Display,
|
||||
{
|
||||
match msg {
|
||||
Ok(m) => {
|
||||
let down = String::from_utf8_lossy(&m).into_owned();
|
||||
let m_down = Message::Text(down);
|
||||
if let Err(e) = tx_client.send(m_down).await {
|
||||
bail!("Failed to msg to client: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
bail!("Failed to read from upstream: {}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(msg) = stream.next() => {
|
||||
if let Err(e) = process_client(msg, &mut ws_upstream.tx).await {
|
||||
error!("{}", e);
|
||||
break;
|
||||
}
|
||||
},
|
||||
Some(r) = ws_upstream.rx.recv() => {
|
||||
let msg: Result<Vec<u8>, anyhow::Error> = Ok(r);
|
||||
if let Err(e) = process_upstream(msg, &mut stream).await {
|
||||
error!("{}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Websocket closed");
|
||||
Ok(())
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
#[openapi(tag = "Payment")]
|
||||
#[get("/api/v1/payment/methods")]
|
||||
async fn v1_get_payment_methods(settings: &State<Settings>) -> ApiResult<Vec<ApiPaymentInfo>> {
|
||||
let mut ret = vec![ApiPaymentInfo {
|
||||
name: ApiPaymentMethod::Lightning,
|
||||
metadata: HashMap::new(),
|
||||
currencies: vec![Currency::BTC],
|
||||
}];
|
||||
#[cfg(feature = "revolut")]
|
||||
if let Some(r) = &settings.revolut {
|
||||
ret.push(ApiPaymentInfo {
|
||||
name: ApiPaymentMethod::Revolut,
|
||||
metadata: HashMap::from([("pubkey".to_string(), r.public_key.to_string())]),
|
||||
currencies: vec![Currency::EUR, Currency::USD],
|
||||
})
|
||||
}
|
||||
|
||||
ApiData::ok(ret)
|
||||
}
|
||||
|
||||
/// Get payment status (for polling)
|
||||
#[openapi(tag = "Payment")]
|
||||
#[get("/api/v1/payment/<id>")]
|
||||
@ -620,3 +885,115 @@ async fn v1_get_payment(
|
||||
|
||||
ApiData::ok(payment.into())
|
||||
}
|
||||
|
||||
/// Print payment invoice
|
||||
#[get("/api/v1/payment/<id>/invoice?<auth>")]
|
||||
async fn v1_get_payment_invoice(
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
id: &str,
|
||||
auth: &str,
|
||||
) -> Result<(ContentType, Vec<u8>), &'static str> {
|
||||
let auth = Nip98Auth::from_base64(auth).map_err(|e| "Missing or invalid auth param")?;
|
||||
if auth
|
||||
.check(&format!("/api/v1/payment/{id}/invoice"), "GET")
|
||||
.is_err()
|
||||
{
|
||||
return Err("Invalid auth event");
|
||||
}
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await.map_err(|_| "Insert failed")?;
|
||||
let id = if let Ok(i) = hex::decode(id) {
|
||||
i
|
||||
} else {
|
||||
return Err("Invalid payment id");
|
||||
};
|
||||
|
||||
let payment = db
|
||||
.get_vm_payment(&id)
|
||||
.await
|
||||
.map_err(|_| "Payment not found")?;
|
||||
let vm = db.get_vm(payment.vm_id).await.map_err(|_| "VM not found")?;
|
||||
if vm.user_id != uid {
|
||||
return Err("VM does not belong to you");
|
||||
}
|
||||
|
||||
if !payment.is_paid {
|
||||
return Err("Payment is not paid, can't generate invoice");
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct PaymentInfo {
|
||||
year: i32,
|
||||
current_date: DateTime<Utc>,
|
||||
vm: ApiVmStatus,
|
||||
payment: ApiVmPayment,
|
||||
user: AccountPatchRequest,
|
||||
npub: String,
|
||||
total: u64,
|
||||
company: Option<ApiCompany>,
|
||||
}
|
||||
|
||||
let host = db
|
||||
.get_host(vm.host_id)
|
||||
.await
|
||||
.map_err(|_| "Host not found")?;
|
||||
let region = db
|
||||
.get_host_region(host.region_id)
|
||||
.await
|
||||
.map_err(|_| "Region not found")?;
|
||||
let company = if let Some(c) = region.company_id {
|
||||
Some(db.get_company(c).await.map_err(|_| "Company not found")?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let user = db.get_user(uid).await.map_err(|_| "User not found")?;
|
||||
#[cfg(debug_assertions)]
|
||||
let template =
|
||||
mustache::compile_path("lnvps_api/invoice.html").map_err(|_| "Invalid template")?;
|
||||
#[cfg(not(debug_assertions))]
|
||||
let template = mustache::compile_str(include_str!("../../invoice.html"))
|
||||
.map_err(|_| "Invalid template")?;
|
||||
|
||||
let now = Utc::now();
|
||||
let mut html = Cursor::new(Vec::new());
|
||||
template
|
||||
.render(
|
||||
&mut html,
|
||||
&PaymentInfo {
|
||||
year: now.year(),
|
||||
current_date: now,
|
||||
vm: vm_to_status(db, vm, None)
|
||||
.await
|
||||
.map_err(|_| "Failed to get VM state")?,
|
||||
total: payment.amount + payment.tax,
|
||||
payment: payment.into(),
|
||||
npub: nostr::PublicKey::from_slice(&user.pubkey)
|
||||
.map_err(|_| "Invalid pubkey")?
|
||||
.to_bech32()
|
||||
.unwrap(),
|
||||
user: user.into(),
|
||||
company: company.map(|c| c.into()),
|
||||
},
|
||||
)
|
||||
.map_err(|_| "Failed to generate invoice")?;
|
||||
Ok((ContentType::HTML, html.into_inner()))
|
||||
}
|
||||
|
||||
/// List payment history of a VM
|
||||
#[openapi(tag = "VM")]
|
||||
#[get("/api/v1/vm/<id>/payments")]
|
||||
async fn v1_payment_history(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
id: u64,
|
||||
) -> ApiResult<Vec<ApiVmPayment>> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
let vm = db.get_vm(id).await?;
|
||||
if vm.user_id != uid {
|
||||
return ApiData::err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let payments = db.list_vm_payment(id).await?;
|
||||
ApiData::ok(payments.into_iter().map(|i| i.into()).collect())
|
||||
}
|
@ -6,25 +6,38 @@ use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
/// Messaging bridge for webhooks to other parts of the system (bitvora)
|
||||
/// Messaging bridge for webhooks to other parts of the system (bitvora/revout)
|
||||
pub static WEBHOOK_BRIDGE: LazyLock<WebhookBridge> = LazyLock::new(WebhookBridge::new);
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if cfg!(feature = "bitvora") {
|
||||
routes![bitvora_webhook]
|
||||
} else {
|
||||
routes![]
|
||||
}
|
||||
let mut routes = vec![];
|
||||
|
||||
#[cfg(feature = "bitvora")]
|
||||
routes.append(&mut routes![bitvora_webhook]);
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
routes.append(&mut routes![revolut_webhook]);
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
#[cfg(feature = "bitvora")]
|
||||
#[post("/api/v1/webhook/bitvora", data = "<req>")]
|
||||
async fn bitvora_webhook(req: WebhookMessage) -> Status {
|
||||
WEBHOOK_BRIDGE.send(req);
|
||||
Status::Ok
|
||||
}
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
#[post("/api/v1/webhook/revolut", data = "<req>")]
|
||||
async fn revolut_webhook(req: WebhookMessage) -> Status {
|
||||
WEBHOOK_BRIDGE.send(req);
|
||||
Status::Ok
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WebhookMessage {
|
||||
pub endpoint: String,
|
||||
pub body: Vec<u8>,
|
||||
pub headers: HashMap<String, String>,
|
||||
}
|
||||
@ -48,6 +61,7 @@ impl<'r> FromData<'r> for WebhookMessage {
|
||||
return rocket::data::Outcome::Error((Status::BadRequest, ()));
|
||||
};
|
||||
let msg = WebhookMessage {
|
||||
endpoint: req.uri().path().to_string(),
|
||||
headers: header,
|
||||
body: body.value.to_vec(),
|
||||
};
|
@ -1,25 +1,26 @@
|
||||
use anyhow::Error;
|
||||
use chrono::Utc;
|
||||
use clap::Parser;
|
||||
use config::{Config, File};
|
||||
use lnvps::api;
|
||||
use lnvps::cors::CORS;
|
||||
use lnvps::exchange::{DefaultRateCache, ExchangeRateService};
|
||||
use lnvps::invoice::InvoiceHandler;
|
||||
use lnvps::lightning::get_node;
|
||||
use lnvps::settings::Settings;
|
||||
use lnvps::status::VmStateCache;
|
||||
use lnvps::worker::{WorkJob, Worker};
|
||||
use lnvps_api::api;
|
||||
use lnvps_api::data_migration::run_data_migrations;
|
||||
use lnvps_api::dvm::start_dvms;
|
||||
use lnvps_api::exchange::{DefaultRateCache, ExchangeRateService};
|
||||
use lnvps_api::lightning::get_node;
|
||||
use lnvps_api::payments::listen_all_payments;
|
||||
use lnvps_api::settings::Settings;
|
||||
use lnvps_api::status::VmStateCache;
|
||||
use lnvps_api::worker::{WorkJob, Worker};
|
||||
use lnvps_common::CORS;
|
||||
use lnvps_db::{LNVpsDb, LNVpsDbMysql};
|
||||
use log::{error, LevelFilter};
|
||||
use log::error;
|
||||
use nostr::Keys;
|
||||
use nostr_sdk::Client;
|
||||
use rocket::http::Method;
|
||||
use rocket_okapi::swagger_ui::{make_swagger_ui, SwaggerUIConfig};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(about, version, author)]
|
||||
@ -35,37 +36,9 @@ struct Args {
|
||||
|
||||
#[rocket::main]
|
||||
async fn main() -> Result<(), Error> {
|
||||
let log_level = std::env::var("RUST_LOG")
|
||||
.unwrap_or_else(|_| "info".to_string()) // Default to "info" if not set
|
||||
.to_lowercase();
|
||||
|
||||
let max_level = match log_level.as_str() {
|
||||
"trace" => LevelFilter::Trace,
|
||||
"debug" => LevelFilter::Debug,
|
||||
"info" => LevelFilter::Info,
|
||||
"warn" => LevelFilter::Warn,
|
||||
"error" => LevelFilter::Error,
|
||||
"off" => LevelFilter::Off,
|
||||
_ => LevelFilter::Info,
|
||||
};
|
||||
env_logger::init();
|
||||
|
||||
let args = Args::parse();
|
||||
fern::Dispatch::new()
|
||||
.level(max_level)
|
||||
.level_for("rocket", LevelFilter::Error)
|
||||
.chain(fern::log_file(
|
||||
args.log.unwrap_or(PathBuf::from(".")).join("main.log"),
|
||||
)?)
|
||||
.chain(std::io::stdout())
|
||||
.format(|out, message, record| {
|
||||
out.finish(format_args!(
|
||||
"[{}] [{}] {}",
|
||||
Utc::now().format("%Y-%m-%d %H:%M:%S"),
|
||||
record.level(),
|
||||
message
|
||||
))
|
||||
})
|
||||
.apply()?;
|
||||
|
||||
let settings: Settings = Config::builder()
|
||||
.add_source(File::from(
|
||||
@ -102,6 +75,9 @@ async fn main() -> Result<(), Error> {
|
||||
let provisioner = settings.get_provisioner(db.clone(), node.clone(), exchange.clone());
|
||||
provisioner.init().await?;
|
||||
|
||||
// run data migrations
|
||||
run_data_migrations(db.clone(), provisioner.clone(), &settings).await?;
|
||||
|
||||
let mut worker = Worker::new(
|
||||
db.clone(),
|
||||
provisioner.clone(),
|
||||
@ -117,15 +93,10 @@ async fn main() -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
});
|
||||
let mut handler = InvoiceHandler::new(node.clone(), db.clone(), sender.clone());
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = handler.listen().await {
|
||||
error!("invoice-error: {}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
});
|
||||
|
||||
// setup payment handlers
|
||||
listen_all_payments(&settings, node.clone(), db.clone(), sender.clone())?;
|
||||
|
||||
// request work every 30s to check vm status
|
||||
let sender_clone = sender.clone();
|
||||
tokio::spawn(async move {
|
||||
@ -152,6 +123,15 @@ async fn main() -> Result<(), Error> {
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "nostr-dvm")]
|
||||
{
|
||||
let nostr_client = nostr_client.unwrap();
|
||||
start_dvms(nostr_client.clone(), provisioner.clone());
|
||||
}
|
||||
|
||||
// request for host info to be patched
|
||||
sender.send(WorkJob::PatchHosts)?;
|
||||
|
||||
let mut config = rocket::Config::default();
|
||||
let ip: SocketAddr = match &settings.listen {
|
||||
Some(i) => i.parse()?,
|
||||
@ -161,7 +141,6 @@ async fn main() -> Result<(), Error> {
|
||||
config.port = ip.port();
|
||||
|
||||
if let Err(e) = rocket::Rocket::custom(config)
|
||||
.attach(CORS)
|
||||
.manage(db.clone())
|
||||
.manage(provisioner.clone())
|
||||
.manage(status.clone())
|
||||
@ -176,10 +155,20 @@ async fn main() -> Result<(), Error> {
|
||||
..Default::default()
|
||||
}),
|
||||
)
|
||||
.attach(CORS)
|
||||
.mount(
|
||||
"/",
|
||||
vec![rocket::Route::ranked(
|
||||
isize::MAX,
|
||||
Method::Options,
|
||||
"/<catch_all_options_route..>",
|
||||
CORS,
|
||||
)],
|
||||
)
|
||||
.launch()
|
||||
.await
|
||||
{
|
||||
error!("{}", e);
|
||||
error!("{:?}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
66
lnvps_api/src/data_migration/dns.rs
Normal file
66
lnvps_api/src/data_migration/dns.rs
Normal file
@ -0,0 +1,66 @@
|
||||
use crate::data_migration::DataMigration;
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::settings::Settings;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct DnsDataMigration {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
dns: Arc<dyn DnsServer>,
|
||||
forward_zone_id: Option<String>,
|
||||
}
|
||||
|
||||
impl DnsDataMigration {
|
||||
pub fn new(db: Arc<dyn LNVpsDb>, settings: &Settings) -> Option<Self> {
|
||||
let dns = settings.get_dns().ok().flatten()?;
|
||||
Some(Self {
|
||||
db,
|
||||
dns,
|
||||
forward_zone_id: settings.dns.as_ref().map(|z| z.forward_zone_id.to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DataMigration for DnsDataMigration {
|
||||
fn migrate(&self) -> Pin<Box<dyn Future<Output = Result<()>> + Send>> {
|
||||
let db = self.db.clone();
|
||||
let dns = self.dns.clone();
|
||||
let forward_zone_id = self.forward_zone_id.clone();
|
||||
Box::pin(async move {
|
||||
let zone_id = if let Some(z) = forward_zone_id {
|
||||
z
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
let vms = db.list_vms().await?;
|
||||
|
||||
for vm in vms {
|
||||
let mut ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
for ip in &mut ips {
|
||||
let mut did_change = false;
|
||||
if ip.dns_forward.is_none() {
|
||||
let rec = BasicRecord::forward(ip)?;
|
||||
let r = dns.add_record(&zone_id, &rec).await?;
|
||||
ip.dns_forward = Some(r.name);
|
||||
ip.dns_forward_ref = r.id;
|
||||
did_change = true;
|
||||
}
|
||||
if ip.dns_reverse.is_none() {
|
||||
let rec = BasicRecord::reverse_to_fwd(ip)?;
|
||||
let r = dns.add_record(&zone_id, &rec).await?;
|
||||
ip.dns_reverse = Some(r.value);
|
||||
ip.dns_reverse_ref = r.id;
|
||||
did_change = true;
|
||||
}
|
||||
if did_change {
|
||||
db.update_vm_ip_assignment(ip).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
52
lnvps_api/src/data_migration/ip6_init.rs
Normal file
52
lnvps_api/src/data_migration/ip6_init.rs
Normal file
@ -0,0 +1,52 @@
|
||||
use crate::data_migration::DataMigration;
|
||||
use crate::provisioner::{LNVpsProvisioner, NetworkProvisioner};
|
||||
use chrono::Utc;
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::info;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct Ip6InitDataMigration {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
provisioner: Arc<LNVpsProvisioner>,
|
||||
}
|
||||
|
||||
impl Ip6InitDataMigration {
|
||||
pub fn new(db: Arc<dyn LNVpsDb>, provisioner: Arc<LNVpsProvisioner>) -> Ip6InitDataMigration {
|
||||
Self { db, provisioner }
|
||||
}
|
||||
}
|
||||
|
||||
impl DataMigration for Ip6InitDataMigration {
|
||||
fn migrate(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + Send>> {
|
||||
let db = self.db.clone();
|
||||
let provisioner = self.provisioner.clone();
|
||||
Box::pin(async move {
|
||||
let net = NetworkProvisioner::new(db.clone());
|
||||
let vms = db.list_vms().await?;
|
||||
for vm in vms {
|
||||
if vm.expires < Utc::now() {
|
||||
continue;
|
||||
}
|
||||
let host = db.get_host(vm.host_id).await?;
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
// if no ipv6 address is picked already pick one
|
||||
if ips.iter().all(|i| {
|
||||
IpNetwork::from_str(&i.ip)
|
||||
.map(|i| i.is_ipv4())
|
||||
.unwrap_or(false)
|
||||
}) {
|
||||
let ips_pick = net.pick_ip_for_region(host.region_id).await?;
|
||||
if let Some(mut v6) = ips_pick.ip6 {
|
||||
info!("Assigning ip {} to vm {}", v6.ip, vm.id);
|
||||
provisioner.assign_available_v6_to_vm(&vm, &mut v6).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
43
lnvps_api/src/data_migration/mod.rs
Normal file
43
lnvps_api/src/data_migration/mod.rs
Normal file
@ -0,0 +1,43 @@
|
||||
use crate::data_migration::dns::DnsDataMigration;
|
||||
use crate::data_migration::ip6_init::Ip6InitDataMigration;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::settings::Settings;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::{error, info};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
mod dns;
|
||||
mod ip6_init;
|
||||
|
||||
/// Basic data migration to run at startup
|
||||
pub trait DataMigration: Send + Sync {
|
||||
fn migrate(&self) -> Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||
}
|
||||
|
||||
pub async fn run_data_migrations(
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
lnvps: Arc<LNVpsProvisioner>,
|
||||
settings: &Settings,
|
||||
) -> Result<()> {
|
||||
let mut migrations: Vec<Box<dyn DataMigration>> = vec![];
|
||||
migrations.push(Box::new(Ip6InitDataMigration::new(
|
||||
db.clone(),
|
||||
lnvps.clone(),
|
||||
)));
|
||||
|
||||
if let Some(d) = DnsDataMigration::new(db.clone(), settings) {
|
||||
migrations.push(Box::new(d));
|
||||
}
|
||||
|
||||
info!("Running {} data migrations", migrations.len());
|
||||
for migration in migrations {
|
||||
if let Err(e) = migration.migrate().await {
|
||||
error!("Error running data migration: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use crate::dns::{BasicRecord, DnsServer, RecordType};
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::json_api::JsonApi;
|
||||
use anyhow::Context;
|
||||
use lnvps_db::async_trait;
|
||||
@ -7,17 +7,17 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
pub struct Cloudflare {
|
||||
api: JsonApi,
|
||||
reverse_zone_id: String,
|
||||
forward_zone_id: String,
|
||||
}
|
||||
|
||||
impl Cloudflare {
|
||||
pub fn new(token: &str, reverse_zone_id: &str, forward_zone_id: &str) -> Cloudflare {
|
||||
pub fn new(token: &str) -> Cloudflare {
|
||||
Self {
|
||||
api: JsonApi::token("https://api.cloudflare.com", &format!("Bearer {}", token))
|
||||
.unwrap(),
|
||||
reverse_zone_id: reverse_zone_id.to_owned(),
|
||||
forward_zone_id: forward_zone_id.to_owned(),
|
||||
api: JsonApi::token(
|
||||
"https://api.cloudflare.com",
|
||||
&format!("Bearer {}", token),
|
||||
false,
|
||||
)
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,11 +41,7 @@ impl Cloudflare {
|
||||
|
||||
#[async_trait]
|
||||
impl DnsServer for Cloudflare {
|
||||
async fn add_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let zone_id = match &record.kind {
|
||||
RecordType::PTR => &self.reverse_zone_id,
|
||||
_ => &self.forward_zone_id,
|
||||
};
|
||||
async fn add_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
info!(
|
||||
"Adding record: [{}] {} => {}",
|
||||
record.kind, record.name, record.value
|
||||
@ -71,11 +67,7 @@ impl DnsServer for Cloudflare {
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_record(&self, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let zone_id = match &record.kind {
|
||||
RecordType::PTR => &self.reverse_zone_id,
|
||||
_ => &self.forward_zone_id,
|
||||
};
|
||||
async fn delete_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let record_id = record.id.as_ref().context("record id missing")?;
|
||||
info!(
|
||||
"Deleting record: [{}] {} => {}",
|
||||
@ -98,11 +90,11 @@ impl DnsServer for Cloudflare {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let zone_id = match &record.kind {
|
||||
RecordType::PTR => &self.reverse_zone_id,
|
||||
_ => &self.forward_zone_id,
|
||||
};
|
||||
async fn update_record(
|
||||
&self,
|
||||
zone_id: &str,
|
||||
record: &BasicRecord,
|
||||
) -> anyhow::Result<BasicRecord> {
|
||||
info!(
|
||||
"Updating record: [{}] {} => {}",
|
||||
record.kind, record.name, record.value
|
@ -6,19 +6,20 @@ use std::str::FromStr;
|
||||
|
||||
#[cfg(feature = "cloudflare")]
|
||||
mod cloudflare;
|
||||
use crate::provisioner::NetworkProvisioner;
|
||||
#[cfg(feature = "cloudflare")]
|
||||
pub use cloudflare::*;
|
||||
|
||||
#[async_trait]
|
||||
pub trait DnsServer: Send + Sync {
|
||||
/// Add PTR record to the reverse zone
|
||||
async fn add_record(&self, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
async fn add_record(&self, zone_id: &str, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
|
||||
/// Delete PTR record from the reverse zone
|
||||
async fn delete_record(&self, record: &BasicRecord) -> Result<()>;
|
||||
async fn delete_record(&self, zone_id: &str, record: &BasicRecord) -> Result<()>;
|
||||
|
||||
/// Update a record
|
||||
async fn update_record(&self, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
async fn update_record(&self, zone_id: &str, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -77,7 +78,7 @@ impl BasicRecord {
|
||||
Ok(Self {
|
||||
name: match addr {
|
||||
IpAddr::V4(i) => i.octets()[3].to_string(),
|
||||
IpAddr::V6(_) => bail!("IPv6 PTR not supported"),
|
||||
IpAddr::V6(i) => NetworkProvisioner::ipv6_to_ptr(&i)?,
|
||||
},
|
||||
value: fwd,
|
||||
id: ip.dns_reverse_ref.clone(),
|
||||
@ -98,7 +99,7 @@ impl BasicRecord {
|
||||
Ok(Self {
|
||||
name: match addr {
|
||||
IpAddr::V4(i) => i.octets()[3].to_string(),
|
||||
IpAddr::V6(_) => bail!("IPv6 PTR not supported"),
|
||||
IpAddr::V6(i) => NetworkProvisioner::ipv6_to_ptr(&i)?,
|
||||
},
|
||||
value: rev,
|
||||
id: ip.dns_reverse_ref.clone(),
|
232
lnvps_api/src/dvm/lnvps.rs
Normal file
232
lnvps_api/src/dvm/lnvps.rs
Normal file
@ -0,0 +1,232 @@
|
||||
use crate::dvm::{build_status_for_job, DVMHandler, DVMJobRequest};
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::{GB, MB};
|
||||
use anyhow::Context;
|
||||
use lnvps_db::{
|
||||
DiskInterface, DiskType, LNVpsDb, OsDistribution, PaymentMethod, UserSshKey, VmCustomTemplate,
|
||||
};
|
||||
use nostr::prelude::DataVendingMachineStatus;
|
||||
use nostr::Tag;
|
||||
use nostr_sdk::Client;
|
||||
use ssh_key::PublicKey;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct LnvpsDvm {
|
||||
client: Client,
|
||||
provisioner: Arc<LNVpsProvisioner>,
|
||||
}
|
||||
|
||||
impl LnvpsDvm {
|
||||
pub fn new(provisioner: Arc<LNVpsProvisioner>, client: Client) -> LnvpsDvm {
|
||||
Self {
|
||||
provisioner,
|
||||
client,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DVMHandler for LnvpsDvm {
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
request: DVMJobRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + Send>> {
|
||||
let provisioner = self.provisioner.clone();
|
||||
let client = self.client.clone();
|
||||
Box::pin(async move {
|
||||
let default_disk = "ssd".to_string();
|
||||
let default_interface = "pcie".to_string();
|
||||
let cpu = request.params.get("cpu").context("missing cpu parameter")?;
|
||||
let memory = request
|
||||
.params
|
||||
.get("memory")
|
||||
.context("missing memory parameter")?;
|
||||
let disk = request
|
||||
.params
|
||||
.get("disk")
|
||||
.context("missing disk parameter")?;
|
||||
let disk_type = request.params.get("disk_type").unwrap_or(&default_disk);
|
||||
let disk_interface = request
|
||||
.params
|
||||
.get("disk_interface")
|
||||
.unwrap_or(&default_interface);
|
||||
let ssh_key = request
|
||||
.params
|
||||
.get("ssh_key")
|
||||
.context("missing ssh_key parameter")?;
|
||||
let ssh_key_name = request.params.get("ssh_key_name");
|
||||
let os_image = request.params.get("os").context("missing os parameter")?;
|
||||
let os_version = request
|
||||
.params
|
||||
.get("os_version")
|
||||
.context("missing os_version parameter")?;
|
||||
let region = request.params.get("region");
|
||||
|
||||
let db = provisioner.get_db();
|
||||
let host_region = if let Some(r) = region {
|
||||
db.get_host_region_by_name(r).await?
|
||||
} else {
|
||||
db.list_host_region()
|
||||
.await?
|
||||
.into_iter()
|
||||
.next()
|
||||
.context("no host region")?
|
||||
};
|
||||
let pricing = db.list_custom_pricing(host_region.id).await?;
|
||||
|
||||
// we expect only 1 pricing per region
|
||||
let pricing = pricing
|
||||
.first()
|
||||
.context("no custom pricing found in region")?;
|
||||
|
||||
let template = VmCustomTemplate {
|
||||
id: 0,
|
||||
cpu: cpu.parse()?,
|
||||
memory: MB * memory.parse::<u64>()?,
|
||||
disk_size: GB * disk.parse::<u64>()?,
|
||||
disk_type: DiskType::from_str(disk_type)?,
|
||||
disk_interface: DiskInterface::from_str(disk_interface)?,
|
||||
pricing_id: pricing.id,
|
||||
};
|
||||
let uid = db.upsert_user(request.event.pubkey.as_bytes()).await?;
|
||||
|
||||
let pk: PublicKey = ssh_key.parse()?;
|
||||
let key_name = if let Some(n) = ssh_key_name {
|
||||
n.clone()
|
||||
} else {
|
||||
pk.comment().to_string()
|
||||
};
|
||||
let new_key = UserSshKey {
|
||||
name: key_name,
|
||||
user_id: uid,
|
||||
key_data: pk.to_openssh()?,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// report as started if params are valid
|
||||
let processing =
|
||||
build_status_for_job(&request, DataVendingMachineStatus::Processing, None, None);
|
||||
client.send_event_builder(processing).await?;
|
||||
|
||||
let existing_keys = db.list_user_ssh_key(uid).await?;
|
||||
let ssh_key_id = if let Some(k) = existing_keys.iter().find(|k| {
|
||||
let ek: PublicKey = k.key_data.parse().unwrap();
|
||||
ek.eq(&pk)
|
||||
}) {
|
||||
k.id
|
||||
} else {
|
||||
db.insert_user_ssh_key(&new_key).await?
|
||||
};
|
||||
|
||||
let image = OsDistribution::from_str(os_image)?;
|
||||
let image = db
|
||||
.list_os_image()
|
||||
.await?
|
||||
.into_iter()
|
||||
.find(|i| i.distribution == image && i.version == *os_version)
|
||||
.context("no os image found")?;
|
||||
|
||||
let vm = provisioner
|
||||
.provision_custom(uid, template, image.id, ssh_key_id, None)
|
||||
.await?;
|
||||
let invoice = provisioner.renew(vm.id, PaymentMethod::Lightning).await?;
|
||||
|
||||
let mut payment = build_status_for_job(
|
||||
&request,
|
||||
DataVendingMachineStatus::PaymentRequired,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
payment = payment.tag(Tag::parse([
|
||||
"amount",
|
||||
invoice.amount.to_string().as_str(),
|
||||
&invoice.external_data,
|
||||
])?);
|
||||
client.send_event_builder(payment).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::dvm::parse_job_request;
|
||||
use crate::exchange::{ExchangeRateService, Ticker};
|
||||
use crate::mocks::{MockDb, MockExchangeRate, MockNode};
|
||||
use crate::settings::mock_settings;
|
||||
use lnvps_db::{VmCustomPricing, VmCustomPricingDisk};
|
||||
use nostr::{EventBuilder, Keys, Kind};
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn test_dvm() -> anyhow::Result<()> {
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::new());
|
||||
let exch = Arc::new(MockExchangeRate::new());
|
||||
exch.set_rate(Ticker::btc_rate("EUR")?, 69_420.0).await;
|
||||
|
||||
{
|
||||
let mut cp = db.custom_pricing.lock().await;
|
||||
cp.insert(
|
||||
1,
|
||||
VmCustomPricing {
|
||||
id: 1,
|
||||
name: "mock".to_string(),
|
||||
enabled: true,
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
region_id: 1,
|
||||
currency: "EUR".to_string(),
|
||||
cpu_cost: 1.5,
|
||||
memory_cost: 0.5,
|
||||
ip4_cost: 1.5,
|
||||
ip6_cost: 0.05,
|
||||
},
|
||||
);
|
||||
let mut cpd = db.custom_pricing_disk.lock().await;
|
||||
cpd.insert(
|
||||
1,
|
||||
VmCustomPricingDisk {
|
||||
id: 1,
|
||||
pricing_id: 1,
|
||||
kind: DiskType::SSD,
|
||||
interface: DiskInterface::PCIe,
|
||||
cost: 0.05,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let settings = mock_settings();
|
||||
let provisioner = Arc::new(LNVpsProvisioner::new(
|
||||
settings,
|
||||
db.clone(),
|
||||
node.clone(),
|
||||
exch.clone(),
|
||||
));
|
||||
let keys = Keys::generate();
|
||||
let empty_client = Client::new(keys.clone());
|
||||
empty_client.add_relay("wss://nos.lol").await?;
|
||||
empty_client.connect().await;
|
||||
|
||||
let mut dvm = LnvpsDvm::new(provisioner.clone(), empty_client.clone());
|
||||
|
||||
let ev = EventBuilder::new(Kind::from_u16(5999), "")
|
||||
.tags([
|
||||
Tag::parse(["param", "cpu", "1"])?,
|
||||
Tag::parse(["param", "memory", "1024"])?,
|
||||
Tag::parse(["param", "disk", "50"])?,
|
||||
Tag::parse(["param", "disk_type", "ssd"])?,
|
||||
Tag::parse(["param", "ssh_key", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGUSrwzZfbjqY81RRC7eg3zRvg0D53HOhjbG6h0SY3f3"])?,
|
||||
])
|
||||
.sign(&keys)
|
||||
.await?;
|
||||
let req = parse_job_request(&ev)?;
|
||||
dvm.handle_request(req).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
257
lnvps_api/src/dvm/mod.rs
Normal file
257
lnvps_api/src/dvm/mod.rs
Normal file
@ -0,0 +1,257 @@
|
||||
mod lnvps;
|
||||
|
||||
use crate::dvm::lnvps::LnvpsDvm;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use anyhow::Result;
|
||||
use futures::FutureExt;
|
||||
use log::{error, info, warn};
|
||||
use nostr::Filter;
|
||||
use nostr_sdk::prelude::DataVendingMachineStatus;
|
||||
use nostr_sdk::{
|
||||
Client, Event, EventBuilder, EventId, Kind, RelayPoolNotification, Tag, Timestamp, Url,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DVMJobRequest {
|
||||
/// The source event
|
||||
pub event: Event,
|
||||
/// Input data for the job (zero or more inputs)
|
||||
pub inputs: Vec<DVMInput>,
|
||||
/// Expected output format. Different job request kind defines this more precisely.
|
||||
pub output_type: Option<String>,
|
||||
/// Optional parameters for the job as key (first argument)/value (second argument).
|
||||
/// Different job request kind defines this more precisely. (e.g. [ "param", "lang", "es" ])
|
||||
pub params: HashMap<String, String>,
|
||||
/// Customer MAY specify a maximum amount (in millisats) they are willing to pay
|
||||
pub bid: Option<u64>,
|
||||
/// List of relays where Service Providers SHOULD publish responses to
|
||||
pub relays: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DVMInput {
|
||||
Url {
|
||||
url: Url,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
Event {
|
||||
event: EventId,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
Job {
|
||||
event: EventId,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
Text {
|
||||
data: String,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Basic DVM handler that accepts a job request
|
||||
pub trait DVMHandler: Send + Sync {
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
request: DVMJobRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||
}
|
||||
|
||||
pub(crate) fn build_status_for_job(
|
||||
req: &DVMJobRequest,
|
||||
status: DataVendingMachineStatus,
|
||||
extra: Option<&str>,
|
||||
content: Option<&str>,
|
||||
) -> EventBuilder {
|
||||
EventBuilder::new(Kind::JobFeedback, content.unwrap_or("")).tags([
|
||||
Tag::parse(["status", status.to_string().as_str(), extra.unwrap_or("")]).unwrap(),
|
||||
Tag::expiration(Timestamp::now() + Duration::from_secs(30)),
|
||||
Tag::event(req.event.id),
|
||||
Tag::public_key(req.event.pubkey),
|
||||
])
|
||||
}
|
||||
|
||||
/// Start listening for jobs with a specific handler
|
||||
fn listen_for_jobs(
|
||||
client: Client,
|
||||
kind: Kind,
|
||||
mut dvm: Box<dyn DVMHandler>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<()>> + Send>> {
|
||||
Box::pin(async move {
|
||||
let sub = client
|
||||
.subscribe(Filter::new().kind(kind).since(Timestamp::now()), None)
|
||||
.await?;
|
||||
|
||||
info!("Listening for jobs: {}", kind);
|
||||
let mut rx = client.notifications();
|
||||
while let Ok(e) = rx.recv().await {
|
||||
match e {
|
||||
RelayPoolNotification::Event { event, .. } if event.kind == kind => {
|
||||
match parse_job_request(&event) {
|
||||
Ok(req) => {
|
||||
if let Err(e) = dvm.handle_request(req.clone()).await {
|
||||
error!("Error handling job request: {}", e);
|
||||
|
||||
let data = build_status_for_job(
|
||||
&req,
|
||||
DataVendingMachineStatus::Error,
|
||||
Some(e.to_string().as_str()),
|
||||
None,
|
||||
);
|
||||
client.send_event_builder(data).await?;
|
||||
}
|
||||
}
|
||||
Err(e) => warn!("Invalid job request: {:?}", e),
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
client.unsubscribe(&sub).await;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_job_request(event: &Event) -> Result<DVMJobRequest> {
|
||||
let mut inputs = vec![];
|
||||
for i_tag in event
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|t| t.kind().as_str() == "i")
|
||||
.map(|t| t.as_slice())
|
||||
{
|
||||
let input = match i_tag[2].as_str() {
|
||||
"url" => DVMInput::Url {
|
||||
url: if let Ok(u) = i_tag[1].parse() {
|
||||
u
|
||||
} else {
|
||||
warn!("Invalid url: {}", i_tag[1]);
|
||||
continue;
|
||||
},
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
"event" => DVMInput::Event {
|
||||
event: if let Ok(t) = EventId::parse(&i_tag[1]) {
|
||||
t
|
||||
} else {
|
||||
warn!("Invalid event id: {}", i_tag[1]);
|
||||
continue;
|
||||
},
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
"job" => DVMInput::Job {
|
||||
event: if let Ok(t) = EventId::parse(&i_tag[1]) {
|
||||
t
|
||||
} else {
|
||||
warn!("Invalid event id in job: {}", i_tag[1]);
|
||||
continue;
|
||||
},
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
"text" => DVMInput::Text {
|
||||
data: i_tag[1].to_string(),
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
t => {
|
||||
warn!("unknown tag: {}", t);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
inputs.push(input);
|
||||
}
|
||||
|
||||
let params: HashMap<String, String> = event
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|t| t.kind().as_str() == "param")
|
||||
.filter_map(|p| {
|
||||
let p = p.as_slice();
|
||||
if p.len() == 3 {
|
||||
Some((p[1].clone(), p[2].clone()))
|
||||
} else {
|
||||
warn!("Invalid param: {}", p.join(", "));
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(DVMJobRequest {
|
||||
event: event.clone(),
|
||||
inputs,
|
||||
output_type: event
|
||||
.tags
|
||||
.iter()
|
||||
.find(|t| t.kind().as_str() == "output")
|
||||
.and_then(|t| t.content())
|
||||
.map(|s| s.to_string()),
|
||||
params,
|
||||
bid: event
|
||||
.tags
|
||||
.iter()
|
||||
.find(|t| t.kind().as_str() == "bid")
|
||||
.and_then(|t| t.content())
|
||||
.and_then(|t| t.parse::<u64>().ok()),
|
||||
relays: event
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|t| t.kind().as_str() == "relay")
|
||||
.map(|c| &c.as_slice()[1..])
|
||||
.flatten()
|
||||
.map(|s| s.to_string())
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn start_dvms(client: Client, provisioner: Arc<LNVpsProvisioner>) -> JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
let dvm = LnvpsDvm::new(provisioner, client.clone());
|
||||
if let Err(e) = listen_for_jobs(client, Kind::from_u16(5999), Box::new(dvm)).await {
|
||||
error!("Error listening jobs: {}", e);
|
||||
}
|
||||
})
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
use anyhow::{anyhow, ensure, Context, Error, Result};
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use lnvps_db::async_trait;
|
||||
use log::info;
|
||||
use rocket::serde::Deserialize;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Serialize;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@ -15,6 +15,11 @@ pub enum Currency {
|
||||
EUR,
|
||||
BTC,
|
||||
USD,
|
||||
GBP,
|
||||
CAD,
|
||||
CHF,
|
||||
AUD,
|
||||
JPY,
|
||||
}
|
||||
|
||||
impl Display for Currency {
|
||||
@ -23,6 +28,11 @@ impl Display for Currency {
|
||||
Currency::EUR => write!(f, "EUR"),
|
||||
Currency::BTC => write!(f, "BTC"),
|
||||
Currency::USD => write!(f, "USD"),
|
||||
Currency::GBP => write!(f, "GBP"),
|
||||
Currency::CAD => write!(f, "CAD"),
|
||||
Currency::CHF => write!(f, "CHF"),
|
||||
Currency::AUD => write!(f, "AUD"),
|
||||
Currency::JPY => write!(f, "JPY"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -35,6 +45,11 @@ impl FromStr for Currency {
|
||||
"eur" => Ok(Currency::EUR),
|
||||
"usd" => Ok(Currency::USD),
|
||||
"btc" => Ok(Currency::BTC),
|
||||
"gbp" => Ok(Currency::GBP),
|
||||
"cad" => Ok(Currency::CAD),
|
||||
"chf" => Ok(Currency::CHF),
|
||||
"aud" => Ok(Currency::AUD),
|
||||
"jpy" => Ok(Currency::JPY),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
@ -60,7 +75,44 @@ impl Display for Ticker {
|
||||
pub struct TickerRate(pub Ticker, pub f32);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub struct CurrencyAmount(pub Currency, pub f32);
|
||||
pub struct CurrencyAmount(Currency, u64);
|
||||
|
||||
impl CurrencyAmount {
|
||||
const MILLI_SATS: f64 = 1.0e11;
|
||||
|
||||
pub fn millisats(amount: u64) -> Self {
|
||||
CurrencyAmount(Currency::BTC, amount)
|
||||
}
|
||||
|
||||
pub fn from_u64(currency: Currency, amount: u64) -> Self {
|
||||
CurrencyAmount(currency, amount)
|
||||
}
|
||||
|
||||
pub fn from_f32(currency: Currency, amount: f32) -> Self {
|
||||
CurrencyAmount(
|
||||
currency,
|
||||
match currency {
|
||||
Currency::BTC => (amount as f64 * Self::MILLI_SATS) as u64, // milli-sats
|
||||
_ => (amount * 100.0) as u64, // cents
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn value(&self) -> u64 {
|
||||
self.1
|
||||
}
|
||||
|
||||
pub fn value_f32(&self) -> f32 {
|
||||
match self.0 {
|
||||
Currency::BTC => (self.1 as f64 / Self::MILLI_SATS) as f32,
|
||||
_ => self.1 as f32 / 100.0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn currency(&self) -> Currency {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl TickerRate {
|
||||
pub fn can_convert(&self, currency: Currency) -> bool {
|
||||
@ -74,9 +126,15 @@ impl TickerRate {
|
||||
"Cant convert, currency doesnt match"
|
||||
);
|
||||
if source.0 == self.0 .0 {
|
||||
Ok(CurrencyAmount(self.0 .1, source.1 * self.1))
|
||||
Ok(CurrencyAmount::from_f32(
|
||||
self.0 .1,
|
||||
source.value_f32() * self.1,
|
||||
))
|
||||
} else {
|
||||
Ok(CurrencyAmount(self.0 .0, source.1 / self.1))
|
||||
Ok(CurrencyAmount::from_f32(
|
||||
self.0 .0,
|
||||
source.value_f32() / self.1,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -99,7 +157,7 @@ pub fn alt_prices(rates: &Vec<TickerRate>, source: CurrencyAmount) -> Vec<Curren
|
||||
let mut ret2 = vec![];
|
||||
for y in rates.iter() {
|
||||
for x in ret.iter() {
|
||||
if let Ok(r1) = y.convert(x.clone()) {
|
||||
if let Ok(r1) = y.convert(*x) {
|
||||
if r1.0 != source.0 {
|
||||
ret2.push(r1);
|
||||
}
|
||||
@ -131,6 +189,21 @@ impl ExchangeRateService for DefaultRateCache {
|
||||
if let Some(eur) = rates.eur {
|
||||
ret.push(TickerRate(Ticker(Currency::BTC, Currency::EUR), eur));
|
||||
}
|
||||
if let Some(gbp) = rates.gbp {
|
||||
ret.push(TickerRate(Ticker(Currency::BTC, Currency::GBP), gbp));
|
||||
}
|
||||
if let Some(cad) = rates.cad {
|
||||
ret.push(TickerRate(Ticker(Currency::BTC, Currency::CAD), cad));
|
||||
}
|
||||
if let Some(chf) = rates.chf {
|
||||
ret.push(TickerRate(Ticker(Currency::BTC, Currency::CHF), chf));
|
||||
}
|
||||
if let Some(aud) = rates.aud {
|
||||
ret.push(TickerRate(Ticker(Currency::BTC, Currency::AUD), aud));
|
||||
}
|
||||
if let Some(jpy) = rates.jpy {
|
||||
ret.push(TickerRate(Ticker(Currency::BTC, Currency::JPY), jpy));
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
@ -158,6 +231,16 @@ struct MempoolRates {
|
||||
pub usd: Option<f32>,
|
||||
#[serde(rename = "EUR")]
|
||||
pub eur: Option<f32>,
|
||||
#[serde(rename = "CAD")]
|
||||
pub cad: Option<f32>,
|
||||
#[serde(rename = "GBP")]
|
||||
pub gbp: Option<f32>,
|
||||
#[serde(rename = "CHF")]
|
||||
pub chf: Option<f32>,
|
||||
#[serde(rename = "AUD")]
|
||||
pub aud: Option<f32>,
|
||||
#[serde(rename = "JPY")]
|
||||
pub jpy: Option<f32>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -171,12 +254,14 @@ mod tests {
|
||||
let f = TickerRate(ticker, RATE);
|
||||
|
||||
assert_eq!(
|
||||
f.convert(CurrencyAmount(Currency::EUR, 5.0)).unwrap(),
|
||||
CurrencyAmount(Currency::BTC, 5.0 / RATE)
|
||||
f.convert(CurrencyAmount::from_f32(Currency::EUR, 5.0))
|
||||
.unwrap(),
|
||||
CurrencyAmount::from_f32(Currency::BTC, 5.0 / RATE)
|
||||
);
|
||||
assert_eq!(
|
||||
f.convert(CurrencyAmount(Currency::BTC, 0.001)).unwrap(),
|
||||
CurrencyAmount(Currency::EUR, RATE * 0.001)
|
||||
f.convert(CurrencyAmount::from_f32(Currency::BTC, 0.001))
|
||||
.unwrap(),
|
||||
CurrencyAmount::from_f32(Currency::EUR, RATE * 0.001)
|
||||
);
|
||||
assert!(!f.can_convert(Currency::USD));
|
||||
assert!(f.can_convert(Currency::EUR));
|
25
lnvps_api/src/fiat/mod.rs
Normal file
25
lnvps_api/src/fiat/mod.rs
Normal file
@ -0,0 +1,25 @@
|
||||
/// Fiat payment integrations
|
||||
use crate::exchange::CurrencyAmount;
|
||||
use anyhow::Result;
|
||||
use rocket::serde::{Deserialize, Serialize};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
mod revolut;
|
||||
#[cfg(feature = "revolut")]
|
||||
pub use revolut::*;
|
||||
|
||||
pub trait FiatPaymentService: Send + Sync {
|
||||
fn create_order(
|
||||
&self,
|
||||
description: &str,
|
||||
amount: CurrencyAmount,
|
||||
) -> Pin<Box<dyn Future<Output = Result<FiatPaymentInfo>> + Send>>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct FiatPaymentInfo {
|
||||
pub external_id: String,
|
||||
pub raw_data: String,
|
||||
}
|
289
lnvps_api/src/fiat/revolut.rs
Normal file
289
lnvps_api/src/fiat/revolut.rs
Normal file
@ -0,0 +1,289 @@
|
||||
use crate::exchange::{Currency, CurrencyAmount};
|
||||
use crate::fiat::{FiatPaymentInfo, FiatPaymentService};
|
||||
use crate::json_api::{JsonApi, TokenGen};
|
||||
use crate::settings::RevolutConfig;
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use nostr::Url;
|
||||
use reqwest::header::AUTHORIZATION;
|
||||
use reqwest::{Method, RequestBuilder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RevolutApi {
|
||||
api: JsonApi,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct RevolutTokenGen {
|
||||
pub token: String,
|
||||
pub api_version: String,
|
||||
}
|
||||
|
||||
impl TokenGen for RevolutTokenGen {
|
||||
fn generate_token(
|
||||
&self,
|
||||
_method: Method,
|
||||
_url: &Url,
|
||||
_body: Option<&str>,
|
||||
req: RequestBuilder,
|
||||
) -> Result<RequestBuilder> {
|
||||
Ok(req
|
||||
.header(AUTHORIZATION, format!("Bearer {}", &self.token))
|
||||
.header("Revolut-Api-Version", &self.api_version))
|
||||
}
|
||||
}
|
||||
|
||||
impl RevolutApi {
|
||||
pub fn new(config: RevolutConfig) -> Result<Self> {
|
||||
let gen = RevolutTokenGen {
|
||||
token: config.token,
|
||||
api_version: config.api_version,
|
||||
};
|
||||
const DEFAULT_URL: &str = "https://merchant.revolut.com";
|
||||
|
||||
Ok(Self {
|
||||
api: JsonApi::token_gen(&config.url.unwrap_or(DEFAULT_URL.to_string()), false, gen)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn list_webhooks(&self) -> Result<Vec<RevolutWebhook>> {
|
||||
self.api.get("/api/1.0/webhooks").await
|
||||
}
|
||||
|
||||
pub async fn delete_webhook(&self, webhook_id: &str) -> Result<()> {
|
||||
self.api
|
||||
.req_status(
|
||||
Method::DELETE,
|
||||
&format!("/api/1.0/webhooks/{}", webhook_id),
|
||||
(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_webhook(
|
||||
&self,
|
||||
url: &str,
|
||||
events: Vec<RevolutWebhookEvent>,
|
||||
) -> Result<RevolutWebhook> {
|
||||
self.api
|
||||
.post(
|
||||
"/api/1.0/webhooks",
|
||||
CreateWebhookRequest {
|
||||
url: url.to_string(),
|
||||
events,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create_order(
|
||||
&self,
|
||||
amount: CurrencyAmount,
|
||||
description: Option<String>,
|
||||
) -> Result<RevolutOrder> {
|
||||
self.api
|
||||
.post(
|
||||
"/api/orders",
|
||||
CreateOrderRequest {
|
||||
currency: amount.currency().to_string(),
|
||||
amount: match amount.currency() {
|
||||
Currency::BTC => bail!("Bitcoin amount not allowed for fiat payments"),
|
||||
_ => amount.value(),
|
||||
},
|
||||
description,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_order(&self, order_id: &str) -> Result<RevolutOrder> {
|
||||
self.api.get(&format!("/api/orders/{}", order_id)).await
|
||||
}
|
||||
}
|
||||
|
||||
impl FiatPaymentService for RevolutApi {
|
||||
fn create_order(
|
||||
&self,
|
||||
description: &str,
|
||||
amount: CurrencyAmount,
|
||||
) -> Pin<Box<dyn Future<Output = Result<FiatPaymentInfo>> + Send>> {
|
||||
let s = self.clone();
|
||||
let desc = description.to_string();
|
||||
Box::pin(async move {
|
||||
let rsp = s.create_order(amount, Some(desc)).await?;
|
||||
Ok(FiatPaymentInfo {
|
||||
raw_data: serde_json::to_string(&rsp)?,
|
||||
external_id: rsp.id,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub struct CreateOrderRequest {
|
||||
pub amount: u64,
|
||||
pub currency: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutOrder {
|
||||
pub id: String,
|
||||
pub token: String,
|
||||
pub state: RevolutOrderState,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub description: Option<String>,
|
||||
pub amount: u64,
|
||||
pub currency: String,
|
||||
pub outstanding_amount: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub checkout_url: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payments: Option<Vec<RevolutOrderPayment>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutOrderPayment {
|
||||
pub id: String,
|
||||
pub state: RevolutPaymentState,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub decline_reason: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bank_message: Option<String>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub token: Option<String>,
|
||||
pub amount: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub currency: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub settled_amount: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub settled_currency: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payment_method: Option<RevolutPaymentMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub billing_address: Option<RevolutBillingAddress>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub risk_level: Option<RevolutRiskLevel>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutPaymentMethod {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: RevolutPaymentMethodType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_brand: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub funding: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_country_code: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_bin: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_last_four: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_expiry: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cardholder_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RevolutPaymentMethodType {
|
||||
ApplePay,
|
||||
Card,
|
||||
GooglePay,
|
||||
RevolutPayCard,
|
||||
RevolutPayAccount,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RevolutRiskLevel {
|
||||
High,
|
||||
Low,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutBillingAddress {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub street_line_1: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub street_line_2: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub region: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub city: Option<String>,
|
||||
|
||||
pub country_code: String,
|
||||
pub postcode: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum RevolutOrderState {
|
||||
Pending,
|
||||
Processing,
|
||||
Authorised,
|
||||
Completed,
|
||||
Cancelled,
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RevolutPaymentState {
|
||||
Pending,
|
||||
AuthenticationChallenge,
|
||||
AuthenticationVerified,
|
||||
AuthorisationStarted,
|
||||
AuthorisationPassed,
|
||||
Authorised,
|
||||
CaptureStarted,
|
||||
Captured,
|
||||
RefundValidated,
|
||||
RefundStarted,
|
||||
CancellationStarted,
|
||||
Declining,
|
||||
Completing,
|
||||
Cancelling,
|
||||
Failing,
|
||||
Completed,
|
||||
Declined,
|
||||
SoftDeclined,
|
||||
Cancelled,
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutWebhook {
|
||||
pub id: String,
|
||||
pub url: String,
|
||||
pub events: Vec<RevolutWebhookEvent>,
|
||||
pub signing_secret: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum RevolutWebhookEvent {
|
||||
OrderAuthorised,
|
||||
OrderCompleted,
|
||||
OrderCancelled,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct CreateWebhookRequest {
|
||||
pub url: String,
|
||||
pub events: Vec<RevolutWebhookEvent>,
|
||||
}
|
657
lnvps_api/src/host/libvirt.rs
Normal file
657
lnvps_api/src/host/libvirt.rs
Normal file
@ -0,0 +1,657 @@
|
||||
use crate::host::{
|
||||
FullVmInfo, TerminalStream, TimeSeries, TimeSeriesData, VmHostClient, VmHostDiskInfo,
|
||||
VmHostInfo,
|
||||
};
|
||||
use crate::settings::QemuConfig;
|
||||
use crate::status::{VmRunningState, VmState};
|
||||
use crate::KB;
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use chrono::Utc;
|
||||
use lnvps_db::{async_trait, LNVpsDb, Vm, VmOsImage};
|
||||
use log::info;
|
||||
use rand::random;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
use virt::connect::Connect;
|
||||
use virt::domain::Domain;
|
||||
use virt::sys::{
|
||||
virDomainCreate, VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE, VIR_DOMAIN_START_VALIDATE,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LibVirtHost {
|
||||
connection: Connect,
|
||||
qemu: QemuConfig,
|
||||
}
|
||||
|
||||
impl LibVirtHost {
|
||||
pub fn new(url: &str, qemu: QemuConfig) -> Result<Self> {
|
||||
Ok(Self {
|
||||
connection: Connect::open(Some(url))?,
|
||||
qemu,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn import_disk_image(&self, vm: &Vm, image: &VmOsImage) -> Result<()> {
|
||||
// https://libvirt.org/html/libvirt-libvirt-storage.html#virStorageVolUpload
|
||||
// https://libvirt.org/html/libvirt-libvirt-storage.html#virStorageVolResize
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_domain_xml(&self, cfg: &FullVmInfo) -> Result<DomainXML> {
|
||||
let storage = self
|
||||
.connection
|
||||
.list_all_storage_pools(VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE)?;
|
||||
|
||||
// check the storage disk exists, we don't need anything else from it for now
|
||||
let _storage_disk = if let Some(d) = storage
|
||||
.iter()
|
||||
.find(|s| s.get_name().map(|n| n == cfg.disk.name).unwrap_or(false))
|
||||
{
|
||||
d
|
||||
} else {
|
||||
bail!(
|
||||
"Disk \"{}\" not found on host! Available pools: {}",
|
||||
cfg.disk.name,
|
||||
storage
|
||||
.iter()
|
||||
.filter_map(|s| s.get_name().ok())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",")
|
||||
);
|
||||
};
|
||||
|
||||
let resources = cfg.resources()?;
|
||||
let mut devices = vec![];
|
||||
// primary disk
|
||||
devices.push(DomainDevice::Disk(Disk {
|
||||
kind: DiskType::File,
|
||||
device: DiskDevice::Disk,
|
||||
source: DiskSource {
|
||||
file: Some(format!("{}:vm-{}-disk0", cfg.disk.name, cfg.vm.id)),
|
||||
..Default::default()
|
||||
},
|
||||
target: DiskTarget {
|
||||
dev: "vda".to_string(),
|
||||
bus: Some(DiskBus::VirtIO),
|
||||
},
|
||||
}));
|
||||
devices.push(DomainDevice::Interface(NetworkInterface {
|
||||
kind: NetworkKind::Bridge,
|
||||
mac: Some(NetworkMac {
|
||||
address: cfg.vm.mac_address.clone(),
|
||||
}),
|
||||
source: Some(NetworkSource {
|
||||
bridge: Some(self.qemu.bridge.clone()),
|
||||
}),
|
||||
target: None,
|
||||
vlan: cfg.host.vlan_id.map(|v| NetworkVlan {
|
||||
tags: vec![NetworkVlanTag { id: v as u32 }],
|
||||
}),
|
||||
}));
|
||||
Ok(DomainXML {
|
||||
kind: DomainType::KVM,
|
||||
id: Some(cfg.vm.id),
|
||||
name: Some(format!("VM{}", cfg.vm.id)),
|
||||
uuid: None,
|
||||
title: None,
|
||||
description: None,
|
||||
os: DomainOs {
|
||||
kind: DomainOsType {
|
||||
kind: DomainOsTypeKind::Hvm,
|
||||
arch: Some(DomainOsArch::from_str(&self.qemu.arch)?),
|
||||
machine: Some(DomainOsMachine::from_str(&self.qemu.machine)?),
|
||||
},
|
||||
firmware: Some(DomainOsFirmware::EFI),
|
||||
loader: Some(DomainOsLoader {
|
||||
read_only: None,
|
||||
kind: None,
|
||||
secure: Some(true),
|
||||
stateless: None,
|
||||
format: None,
|
||||
}),
|
||||
boot: DomainOsBoot {
|
||||
dev: DomainOsBootDev::HardDrive,
|
||||
},
|
||||
},
|
||||
vcpu: resources.cpu,
|
||||
memory: resources.memory,
|
||||
devices: DomainDevices { contents: devices },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl VmHostClient for LibVirtHost {
|
||||
async fn get_info(&self) -> Result<VmHostInfo> {
|
||||
let info = self.connection.get_node_info()?;
|
||||
let storage = self
|
||||
.connection
|
||||
.list_all_storage_pools(VIR_CONNECT_LIST_STORAGE_POOLS_ACTIVE)?;
|
||||
Ok(VmHostInfo {
|
||||
cpu: info.cpus as u16,
|
||||
memory: info.memory * KB,
|
||||
disks: storage
|
||||
.iter()
|
||||
.filter_map(|p| {
|
||||
let info = p.get_info().ok()?;
|
||||
Some(VmHostDiskInfo {
|
||||
name: p.get_name().context("storage pool name is missing").ok()?,
|
||||
size: info.capacity,
|
||||
used: info.allocation,
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> Result<()> {
|
||||
// TODO: download ISO images to host (somehow, ssh?)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn generate_mac(&self, _vm: &Vm) -> Result<String> {
|
||||
Ok(format!(
|
||||
"52:54:00:{}:{}:{}",
|
||||
hex::encode([random::<u8>()]),
|
||||
hex::encode([random::<u8>()]),
|
||||
hex::encode([random::<u8>()])
|
||||
))
|
||||
}
|
||||
|
||||
async fn start_vm(&self, vm: &Vm) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn stop_vm(&self, vm: &Vm) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn reset_vm(&self, vm: &Vm) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_vm(&self, cfg: &FullVmInfo) -> Result<()> {
|
||||
let domain = self.create_domain_xml(cfg)?;
|
||||
let xml = quick_xml::se::to_string(&domain)?;
|
||||
let domain = Domain::create_xml(&self.connection, &xml, VIR_DOMAIN_START_VALIDATE)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_vm(&self, vm: &Vm) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_vm_state(&self, vm: &Vm) -> Result<VmState> {
|
||||
Ok(VmState {
|
||||
timestamp: Utc::now().timestamp() as u64,
|
||||
state: VmRunningState::Stopped,
|
||||
cpu_usage: 0.0,
|
||||
mem_usage: 0.0,
|
||||
uptime: 0,
|
||||
net_in: 0,
|
||||
net_out: 0,
|
||||
disk_write: 0,
|
||||
disk_read: 0,
|
||||
})
|
||||
}
|
||||
|
||||
async fn configure_vm(&self, vm: &FullVmInfo) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_time_series_data(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
series: TimeSeries,
|
||||
) -> Result<Vec<TimeSeriesData>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn connect_terminal(&self, vm: &Vm) -> Result<TerminalStream> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "domain")]
|
||||
struct DomainXML {
|
||||
#[serde(rename = "@type")]
|
||||
pub kind: DomainType,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@id")]
|
||||
pub id: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub uuid: Option<Uuid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub title: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
pub os: DomainOs,
|
||||
pub vcpu: u16,
|
||||
pub memory: u64,
|
||||
pub devices: DomainDevices,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "devices")]
|
||||
struct DomainDevices {
|
||||
#[serde(rename = "$value")]
|
||||
pub contents: Vec<DomainDevice>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainType {
|
||||
#[default]
|
||||
KVM,
|
||||
XEN,
|
||||
HVF,
|
||||
QEMU,
|
||||
LXC,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "os")]
|
||||
struct DomainOs {
|
||||
#[serde(rename = "type")]
|
||||
pub kind: DomainOsType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@firmware")]
|
||||
pub firmware: Option<DomainOsFirmware>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub loader: Option<DomainOsLoader>,
|
||||
pub boot: DomainOsBoot,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsFirmware {
|
||||
#[default]
|
||||
EFI,
|
||||
BIOS,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
struct DomainOsType {
|
||||
#[serde(rename = "$text")]
|
||||
pub kind: DomainOsTypeKind,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@arch")]
|
||||
pub arch: Option<DomainOsArch>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@machine")]
|
||||
pub machine: Option<DomainOsMachine>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsTypeKind {
|
||||
#[default]
|
||||
Hvm,
|
||||
Xen,
|
||||
Linux,
|
||||
XenPvh,
|
||||
Exe,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsMachine {
|
||||
#[default]
|
||||
Q35,
|
||||
PC,
|
||||
}
|
||||
|
||||
impl FromStr for DomainOsMachine {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"q35" => Ok(DomainOsMachine::Q35),
|
||||
"pc" => Ok(DomainOsMachine::PC),
|
||||
v => bail!("Unknown machine type {}", v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsArch {
|
||||
#[default]
|
||||
X86_64,
|
||||
I686,
|
||||
}
|
||||
|
||||
impl FromStr for DomainOsArch {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"x86_64" => Ok(Self::X86_64),
|
||||
"i686" => Ok(Self::I686),
|
||||
v => bail!("unsupported arch {}", v),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "loader")]
|
||||
struct DomainOsLoader {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@readonly")]
|
||||
pub read_only: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@type")]
|
||||
pub kind: Option<DomainOsLoaderType>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@secure")]
|
||||
pub secure: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@stateless")]
|
||||
pub stateless: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@format")]
|
||||
pub format: Option<DomainOsLoaderFormat>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsLoaderType {
|
||||
#[default]
|
||||
ROM,
|
||||
PFlash,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsLoaderFormat {
|
||||
Raw,
|
||||
#[default]
|
||||
QCow2,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
struct DomainOsBoot {
|
||||
#[serde(rename = "@dev")]
|
||||
pub dev: DomainOsBootDev,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DomainOsBootDev {
|
||||
#[serde(rename = "fd")]
|
||||
Floppy,
|
||||
#[serde(rename = "hd")]
|
||||
#[default]
|
||||
HardDrive,
|
||||
CdRom,
|
||||
Network,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "vcpu")]
|
||||
struct DomainVCPU {
|
||||
#[serde(rename = "$text")]
|
||||
pub count: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
enum DomainDevice {
|
||||
#[serde(rename = "disk")]
|
||||
Disk(Disk),
|
||||
#[serde(rename = "interface")]
|
||||
Interface(NetworkInterface),
|
||||
#[serde(other)]
|
||||
Other,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "interface")]
|
||||
struct NetworkInterface {
|
||||
#[serde(rename = "@type")]
|
||||
pub kind: NetworkKind,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mac: Option<NetworkMac>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub source: Option<NetworkSource>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub target: Option<NetworkTarget>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vlan: Option<NetworkVlan>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "vlan")]
|
||||
struct NetworkVlan {
|
||||
#[serde(rename = "tag")]
|
||||
pub tags: Vec<NetworkVlanTag>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "tag")]
|
||||
struct NetworkVlanTag {
|
||||
#[serde(rename = "@id")]
|
||||
pub id: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum NetworkKind {
|
||||
Network,
|
||||
#[default]
|
||||
Bridge,
|
||||
User,
|
||||
Ethernet,
|
||||
Direct,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename = "mac")]
|
||||
struct NetworkMac {
|
||||
#[serde(rename = "@address")]
|
||||
pub address: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename = "source")]
|
||||
struct NetworkSource {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@bridge")]
|
||||
pub bridge: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename = "target")]
|
||||
struct NetworkTarget {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@dev")]
|
||||
pub dev: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename = "disk")]
|
||||
struct Disk {
|
||||
#[serde(rename = "@type")]
|
||||
pub kind: DiskType,
|
||||
#[serde(rename = "@device")]
|
||||
pub device: DiskDevice,
|
||||
pub source: DiskSource,
|
||||
pub target: DiskTarget,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DiskType {
|
||||
#[default]
|
||||
File,
|
||||
Block,
|
||||
Dir,
|
||||
Network,
|
||||
Volume,
|
||||
Nvme,
|
||||
VHostUser,
|
||||
VHostVdpa,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DiskDevice {
|
||||
Floppy,
|
||||
#[default]
|
||||
Disk,
|
||||
CdRom,
|
||||
Lun,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "source")]
|
||||
struct DiskSource {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@file")]
|
||||
pub file: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@dir")]
|
||||
pub dir: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename = "target")]
|
||||
struct DiskTarget {
|
||||
/// Device name (hint)
|
||||
#[serde(rename = "@dev")]
|
||||
pub dev: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "@bus")]
|
||||
pub bus: Option<DiskBus>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum DiskBus {
|
||||
#[default]
|
||||
IDE,
|
||||
SCSI,
|
||||
VirtIO,
|
||||
XEN,
|
||||
USB,
|
||||
SATA,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::host::tests::mock_full_vm;
|
||||
|
||||
fn cfg() -> FullVmInfo {
|
||||
let mut cfg = mock_full_vm();
|
||||
// adjust mock data for libvirt test driver
|
||||
cfg.disk.name = "default-pool".to_string();
|
||||
|
||||
cfg
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_xml_os() -> Result<()> {
|
||||
let tag = "<os firmware=\"efi\"><type>hvm</type><boot dev=\"hd\"/></os>";
|
||||
|
||||
let test = DomainOs {
|
||||
kind: DomainOsType {
|
||||
kind: DomainOsTypeKind::Hvm,
|
||||
arch: None,
|
||||
machine: None,
|
||||
},
|
||||
firmware: Some(DomainOsFirmware::EFI),
|
||||
loader: None,
|
||||
boot: DomainOsBoot {
|
||||
dev: DomainOsBootDev::HardDrive,
|
||||
},
|
||||
};
|
||||
|
||||
let xml = quick_xml::se::to_string(&test)?;
|
||||
assert_eq!(tag, xml);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn text_xml_disk() -> Result<()> {
|
||||
let tag = "<disk type=\"file\" device=\"disk\"><source file=\"/var/lib/libvirt/images/disk.qcow2\"/><target dev=\"vda\" bus=\"virtio\"/></disk>";
|
||||
|
||||
let test = Disk {
|
||||
kind: DiskType::File,
|
||||
device: DiskDevice::Disk,
|
||||
source: DiskSource {
|
||||
file: Some("/var/lib/libvirt/images/disk.qcow2".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
target: DiskTarget {
|
||||
dev: "vda".to_string(),
|
||||
bus: Some(DiskBus::VirtIO),
|
||||
},
|
||||
};
|
||||
let xml = quick_xml::se::to_string(&test)?;
|
||||
assert_eq!(tag, xml);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn text_config_to_domain() -> Result<()> {
|
||||
let cfg = cfg();
|
||||
let template = cfg.template.clone().unwrap();
|
||||
|
||||
let q_cfg = QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr0".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
kvm: true,
|
||||
arch: "x86_64".to_string(),
|
||||
};
|
||||
let host = LibVirtHost::new("test:///default", q_cfg)?;
|
||||
let xml = host.create_domain_xml(&cfg)?;
|
||||
|
||||
let res = cfg.resources()?;
|
||||
assert_eq!(xml.vcpu, res.cpu);
|
||||
assert_eq!(xml.memory, res.memory);
|
||||
|
||||
let xml = quick_xml::se::to_string(&xml)?;
|
||||
println!("{}", xml);
|
||||
|
||||
let output = r#"<domain type="kvm" id="1"><name>VM1</name><os firmware="efi"><type arch="x86_64" machine="q35">hvm</type><loader secure="true"/><boot dev="hd"/></os><vcpu>2</vcpu><memory>2147483648</memory><devices><disk type="file" device="disk"><source file="default-pool:vm-1-disk0"/><target dev="vda" bus="virtio"/></disk><interface type="bridge"><mac address="ff:ff:ff:ff:ff:fe"/><source bridge="vmbr0"/><vlan><tag id="100"/></vlan></interface></devices></domain>"#;
|
||||
assert_eq!(xml, output);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[tokio::test]
|
||||
async fn text_vm_lifecycle() -> Result<()> {
|
||||
let cfg = cfg();
|
||||
let template = cfg.template.clone().unwrap();
|
||||
|
||||
let q_cfg = QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr0".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
kvm: true,
|
||||
arch: "x86_64".to_string(),
|
||||
};
|
||||
let host = LibVirtHost::new("test:///default", q_cfg)?;
|
||||
println!("{:?}", host.get_info().await?);
|
||||
host.create_vm(&cfg).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
373
lnvps_api/src/host/mod.rs
Normal file
373
lnvps_api/src/host/mod.rs
Normal file
@ -0,0 +1,373 @@
|
||||
use crate::settings::ProvisionerConfig;
|
||||
use crate::status::VmState;
|
||||
use anyhow::{bail, Result};
|
||||
use futures::future::join_all;
|
||||
use lnvps_db::{
|
||||
async_trait, IpRange, LNVpsDb, UserSshKey, Vm, VmCustomTemplate, VmHost, VmHostDisk,
|
||||
VmHostKind, VmIpAssignment, VmOsImage, VmTemplate,
|
||||
};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
|
||||
#[cfg(feature = "libvirt")]
|
||||
mod libvirt;
|
||||
#[cfg(feature = "proxmox")]
|
||||
mod proxmox;
|
||||
|
||||
pub struct TerminalStream {
|
||||
pub rx: Receiver<Vec<u8>>,
|
||||
pub tx: Sender<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// Generic type for creating VM's
|
||||
#[async_trait]
|
||||
pub trait VmHostClient: Send + Sync {
|
||||
async fn get_info(&self) -> Result<VmHostInfo>;
|
||||
|
||||
/// Download OS image to the host
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> Result<()>;
|
||||
|
||||
/// Create a random MAC address for the NIC
|
||||
async fn generate_mac(&self, vm: &Vm) -> Result<String>;
|
||||
|
||||
/// Start a VM
|
||||
async fn start_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Stop a VM
|
||||
async fn stop_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Reset VM (Hard)
|
||||
async fn reset_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Spawn a VM
|
||||
async fn create_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Delete a VM
|
||||
async fn delete_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Re-install a vm OS
|
||||
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Get the running status of a VM
|
||||
async fn get_vm_state(&self, vm: &Vm) -> Result<VmState>;
|
||||
|
||||
/// Apply vm configuration (patch)
|
||||
async fn configure_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Get resource usage data
|
||||
async fn get_time_series_data(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
series: TimeSeries,
|
||||
) -> Result<Vec<TimeSeriesData>>;
|
||||
|
||||
/// Connect to terminal serial port
|
||||
async fn connect_terminal(&self, vm: &Vm) -> Result<TerminalStream>;
|
||||
}
|
||||
|
||||
pub fn get_host_client(host: &VmHost, cfg: &ProvisionerConfig) -> Result<Arc<dyn VmHostClient>> {
|
||||
#[cfg(test)]
|
||||
return Ok(Arc::new(crate::mocks::MockVmHost::new()));
|
||||
|
||||
Ok(match host.kind.clone() {
|
||||
#[cfg(feature = "proxmox")]
|
||||
VmHostKind::Proxmox if cfg.proxmox.is_some() => {
|
||||
let cfg = cfg.proxmox.clone().unwrap();
|
||||
Arc::new(proxmox::ProxmoxClient::new(
|
||||
host.ip.parse()?,
|
||||
&host.name,
|
||||
&host.api_token,
|
||||
cfg.mac_prefix,
|
||||
cfg.qemu,
|
||||
cfg.ssh,
|
||||
))
|
||||
}
|
||||
#[cfg(feature = "libvirt")]
|
||||
VmHostKind::LibVirt if cfg.libvirt.is_some() => {
|
||||
let cfg = cfg.libvirt.clone().unwrap();
|
||||
Arc::new(libvirt::LibVirtHost::new(&host.ip, cfg.qemu)?)
|
||||
}
|
||||
_ => bail!("Unknown host config: {}", host.kind),
|
||||
})
|
||||
}
|
||||
|
||||
/// All VM info necessary to provision a VM and its associated resources
|
||||
pub struct FullVmInfo {
|
||||
/// Instance to create
|
||||
pub vm: Vm,
|
||||
/// Host where the VM will be spawned
|
||||
pub host: VmHost,
|
||||
/// Disk where this VM will be saved on the host
|
||||
pub disk: VmHostDisk,
|
||||
/// VM template resources
|
||||
pub template: Option<VmTemplate>,
|
||||
/// VM custom template resources
|
||||
pub custom_template: Option<VmCustomTemplate>,
|
||||
/// The OS image used to create the VM
|
||||
pub image: VmOsImage,
|
||||
/// List of IP resources assigned to this VM
|
||||
pub ips: Vec<VmIpAssignment>,
|
||||
/// Ranges associated with [ips]
|
||||
pub ranges: Vec<IpRange>,
|
||||
/// SSH key to access the VM
|
||||
pub ssh_key: UserSshKey,
|
||||
}
|
||||
|
||||
impl FullVmInfo {
|
||||
pub async fn load(vm_id: u64, db: Arc<dyn LNVpsDb>) -> Result<Self> {
|
||||
let vm = db.get_vm(vm_id).await?;
|
||||
let host = db.get_host(vm.host_id).await?;
|
||||
let image = db.get_os_image(vm.image_id).await?;
|
||||
let disk = db.get_host_disk(vm.disk_id).await?;
|
||||
let ssh_key = db.get_user_ssh_key(vm.ssh_key_id).await?;
|
||||
let ips = db.list_vm_ip_assignments(vm_id).await?;
|
||||
|
||||
let ip_range_ids: HashSet<u64> = ips.iter().map(|i| i.ip_range_id).collect();
|
||||
let ip_ranges: Vec<_> = ip_range_ids.iter().map(|i| db.get_ip_range(*i)).collect();
|
||||
let ranges: Vec<IpRange> = join_all(ip_ranges)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
|
||||
let template = if let Some(t) = vm.template_id {
|
||||
Some(db.get_vm_template(t).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let custom_template = if let Some(t) = vm.custom_template_id {
|
||||
Some(db.get_custom_vm_template(t).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// create VM
|
||||
Ok(FullVmInfo {
|
||||
vm,
|
||||
host,
|
||||
template,
|
||||
custom_template,
|
||||
image,
|
||||
ips,
|
||||
disk,
|
||||
ranges,
|
||||
ssh_key,
|
||||
})
|
||||
}
|
||||
|
||||
/// CPU cores
|
||||
pub fn resources(&self) -> Result<VmResources> {
|
||||
if let Some(t) = &self.template {
|
||||
Ok(VmResources {
|
||||
cpu: t.cpu,
|
||||
memory: t.memory,
|
||||
disk_size: t.disk_size,
|
||||
})
|
||||
} else if let Some(t) = &self.custom_template {
|
||||
Ok(VmResources {
|
||||
cpu: t.cpu,
|
||||
memory: t.memory,
|
||||
disk_size: t.disk_size,
|
||||
})
|
||||
} else {
|
||||
bail!("Invalid VM config, no template");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct VmResources {
|
||||
pub cpu: u16,
|
||||
pub memory: u64,
|
||||
pub disk_size: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct TimeSeriesData {
|
||||
pub timestamp: u64,
|
||||
pub cpu: f32,
|
||||
pub memory: f32,
|
||||
pub memory_size: u64,
|
||||
pub net_in: f32,
|
||||
pub net_out: f32,
|
||||
pub disk_write: f32,
|
||||
pub disk_read: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum TimeSeries {
|
||||
Hourly,
|
||||
Daily,
|
||||
Weekly,
|
||||
Monthly,
|
||||
Yearly,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VmHostInfo {
|
||||
pub cpu: u16,
|
||||
pub memory: u64,
|
||||
pub disks: Vec<VmHostDiskInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VmHostDiskInfo {
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub used: u64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::host::FullVmInfo;
|
||||
use crate::{GB, TB};
|
||||
use chrono::Utc;
|
||||
use lnvps_db::{
|
||||
DiskInterface, DiskType, IpRange, IpRangeAllocationMode, OsDistribution, UserSshKey, Vm,
|
||||
VmHost, VmHostDisk, VmIpAssignment, VmOsImage, VmTemplate,
|
||||
};
|
||||
|
||||
pub fn mock_full_vm() -> FullVmInfo {
|
||||
let template = VmTemplate {
|
||||
id: 1,
|
||||
name: "example".to_string(),
|
||||
enabled: true,
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
cpu: 2,
|
||||
memory: 2 * GB,
|
||||
disk_size: 100 * GB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
region_id: 1,
|
||||
};
|
||||
FullVmInfo {
|
||||
vm: Vm {
|
||||
id: 1,
|
||||
host_id: 1,
|
||||
user_id: 1,
|
||||
image_id: 1,
|
||||
template_id: Some(template.id),
|
||||
custom_template_id: None,
|
||||
ssh_key_id: 1,
|
||||
created: Default::default(),
|
||||
expires: Default::default(),
|
||||
disk_id: 1,
|
||||
mac_address: "ff:ff:ff:ff:ff:fe".to_string(),
|
||||
deleted: false,
|
||||
ref_code: None,
|
||||
},
|
||||
host: VmHost {
|
||||
id: 1,
|
||||
kind: Default::default(),
|
||||
region_id: 1,
|
||||
name: "mock".to_string(),
|
||||
ip: "https://localhost:8006".to_string(),
|
||||
cpu: 20,
|
||||
memory: 128 * GB,
|
||||
enabled: true,
|
||||
api_token: "mock".to_string(),
|
||||
load_cpu: 1.0,
|
||||
load_memory: 1.0,
|
||||
load_disk: 1.0,
|
||||
vlan_id: Some(100),
|
||||
},
|
||||
disk: VmHostDisk {
|
||||
id: 1,
|
||||
host_id: 1,
|
||||
name: "ssd".to_string(),
|
||||
size: TB * 20,
|
||||
kind: DiskType::SSD,
|
||||
interface: DiskInterface::PCIe,
|
||||
enabled: true,
|
||||
},
|
||||
template: Some(template.clone()),
|
||||
custom_template: None,
|
||||
image: VmOsImage {
|
||||
id: 1,
|
||||
distribution: OsDistribution::Ubuntu,
|
||||
flavour: "Server".to_string(),
|
||||
version: "24.04.03".to_string(),
|
||||
enabled: true,
|
||||
release_date: Utc::now(),
|
||||
url: "http://localhost.com/ubuntu_server_24.04.img".to_string(),
|
||||
default_username: None,
|
||||
},
|
||||
ips: vec![
|
||||
VmIpAssignment {
|
||||
id: 1,
|
||||
vm_id: 1,
|
||||
ip_range_id: 1,
|
||||
ip: "192.168.1.2".to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
},
|
||||
VmIpAssignment {
|
||||
id: 2,
|
||||
vm_id: 1,
|
||||
ip_range_id: 2,
|
||||
ip: "192.168.2.2".to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
},
|
||||
VmIpAssignment {
|
||||
id: 3,
|
||||
vm_id: 1,
|
||||
ip_range_id: 3,
|
||||
ip: "fd00::ff:ff:ff:ff:ff".to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
},
|
||||
],
|
||||
ranges: vec![
|
||||
IpRange {
|
||||
id: 1,
|
||||
cidr: "192.168.1.0/24".to_string(),
|
||||
gateway: "192.168.1.1/16".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
..Default::default()
|
||||
},
|
||||
IpRange {
|
||||
id: 2,
|
||||
cidr: "192.168.2.0/24".to_string(),
|
||||
gateway: "10.10.10.10".to_string(),
|
||||
enabled: true,
|
||||
region_id: 2,
|
||||
..Default::default()
|
||||
},
|
||||
IpRange {
|
||||
id: 3,
|
||||
cidr: "fd00::/64".to_string(),
|
||||
gateway: "fd00::1".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
allocation_mode: IpRangeAllocationMode::SlaacEui64,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
ssh_key: UserSshKey {
|
||||
id: 1,
|
||||
name: "test".to_string(),
|
||||
user_id: 1,
|
||||
created: Default::default(),
|
||||
key_data: "ssh-ed25519 AAA=".to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
@ -1,12 +1,16 @@
|
||||
use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient};
|
||||
use crate::host::{
|
||||
FullVmInfo, TerminalStream, TimeSeries, TimeSeriesData, VmHostClient, VmHostDiskInfo,
|
||||
VmHostInfo,
|
||||
};
|
||||
use crate::json_api::JsonApi;
|
||||
use crate::settings::{QemuConfig, SshConfig};
|
||||
use crate::ssh_client::SshClient;
|
||||
use crate::status::{VmRunningState, VmState};
|
||||
use anyhow::{anyhow, bail, ensure, Result};
|
||||
use anyhow::{anyhow, bail, ensure, Context, Result};
|
||||
use chrono::Utc;
|
||||
use futures::StreamExt;
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::{async_trait, DiskType, Vm, VmOsImage};
|
||||
use lnvps_db::{async_trait, DiskType, IpRangeAllocationMode, Vm, VmOsImage};
|
||||
use log::{info, warn};
|
||||
use rand::random;
|
||||
use reqwest::header::{HeaderMap, AUTHORIZATION};
|
||||
@ -14,9 +18,11 @@ use reqwest::{ClientBuilder, Method, Url};
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::io::Write;
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::channel;
|
||||
use tokio::time::sleep;
|
||||
|
||||
pub struct ProxmoxClient {
|
||||
@ -36,19 +42,8 @@ impl ProxmoxClient {
|
||||
config: QemuConfig,
|
||||
ssh: Option<SshConfig>,
|
||||
) -> Self {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
AUTHORIZATION,
|
||||
format!("PVEAPIToken={}", token).parse().unwrap(),
|
||||
);
|
||||
let client = ClientBuilder::new()
|
||||
.danger_accept_invalid_certs(true)
|
||||
.default_headers(headers)
|
||||
.build()
|
||||
.expect("Failed to build client");
|
||||
|
||||
Self {
|
||||
api: JsonApi { base, client },
|
||||
api: JsonApi::token(base.as_str(), &format!("PVEAPIToken={}", token), true).unwrap(),
|
||||
config,
|
||||
ssh,
|
||||
node: node.to_string(),
|
||||
@ -94,6 +89,14 @@ impl ProxmoxClient {
|
||||
Ok(rsp.data)
|
||||
}
|
||||
|
||||
pub async fn list_disks(&self, node: &str) -> Result<Vec<NodeDisk>> {
|
||||
let rsp: ResponseBase<Vec<NodeDisk>> = self
|
||||
.api
|
||||
.get(&format!("/api2/json/nodes/{node}/disks/list"))
|
||||
.await?;
|
||||
Ok(rsp.data)
|
||||
}
|
||||
|
||||
/// List files in a storage pool
|
||||
pub async fn list_storage_files(
|
||||
&self,
|
||||
@ -248,7 +251,7 @@ impl ProxmoxClient {
|
||||
if let Some(ssh_config) = &self.ssh {
|
||||
let mut ses = SshClient::new()?;
|
||||
ses.connect(
|
||||
(self.api.base.host().unwrap().to_string(), 22),
|
||||
(self.api.base().host().unwrap().to_string(), 22),
|
||||
&ssh_config.user,
|
||||
&ssh_config.key,
|
||||
)
|
||||
@ -365,6 +368,30 @@ impl ProxmoxClient {
|
||||
node: node.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Delete disks from VM
|
||||
pub async fn unlink_disk(
|
||||
&self,
|
||||
node: &str,
|
||||
vm: ProxmoxVmId,
|
||||
disks: Vec<String>,
|
||||
force: bool,
|
||||
) -> Result<()> {
|
||||
self.api
|
||||
.req_status(
|
||||
Method::PUT,
|
||||
&format!(
|
||||
"/api2/json/nodes/{}/qemu/{}/unlink?idlist={}&force={}",
|
||||
node,
|
||||
vm,
|
||||
disks.join(","),
|
||||
if force { "1" } else { "0" }
|
||||
),
|
||||
(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ProxmoxClient {
|
||||
@ -376,15 +403,28 @@ impl ProxmoxClient {
|
||||
if let Ok(net) = ip.ip.parse::<IpAddr>() {
|
||||
Some(match net {
|
||||
IpAddr::V4(addr) => {
|
||||
let range = value.ranges.iter().find(|r| r.id == ip.ip_range_id)?;
|
||||
let range: IpNetwork = range.gateway.parse().ok()?;
|
||||
let ip_range = value.ranges.iter().find(|r| r.id == ip.ip_range_id)?;
|
||||
let range: IpNetwork = ip_range.cidr.parse().ok()?;
|
||||
let range_gw: IpNetwork = ip_range.gateway.parse().ok()?;
|
||||
// take the largest (smallest prefix number) of the network prefixes
|
||||
let max_net = range.prefix().min(range_gw.prefix());
|
||||
format!(
|
||||
"ip={},gw={}",
|
||||
IpNetwork::new(addr.into(), range.prefix()).ok()?,
|
||||
range.ip()
|
||||
IpNetwork::new(addr.into(), max_net).ok()?,
|
||||
range_gw.ip()
|
||||
)
|
||||
}
|
||||
IpAddr::V6(addr) => format!("ip6={}", addr),
|
||||
IpAddr::V6(addr) => {
|
||||
let ip_range = value.ranges.iter().find(|r| r.id == ip.ip_range_id)?;
|
||||
if matches!(ip_range.allocation_mode, IpRangeAllocationMode::SlaacEui64)
|
||||
{
|
||||
// just ignore what's in the db and use whatever the host wants
|
||||
// what's in the db is purely informational
|
||||
"ip6=auto".to_string()
|
||||
} else {
|
||||
format!("ip6={}", addr)
|
||||
}
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@ -392,14 +432,11 @@ impl ProxmoxClient {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// TODO: make this configurable
|
||||
ip_config.push("ip6=auto".to_string());
|
||||
|
||||
let mut net = vec![
|
||||
format!("virtio={}", value.vm.mac_address),
|
||||
format!("bridge={}", self.config.bridge),
|
||||
];
|
||||
if let Some(t) = self.config.vlan {
|
||||
if let Some(t) = value.host.vlan_id {
|
||||
net.push(format!("tag={}", t));
|
||||
}
|
||||
|
||||
@ -415,7 +452,7 @@ impl ProxmoxClient {
|
||||
bios: Some(VmBios::OVMF),
|
||||
boot: Some("order=scsi0".to_string()),
|
||||
cores: Some(vm_resources.cpu as i32),
|
||||
memory: Some((vm_resources.memory / 1024 / 1024).to_string()),
|
||||
memory: Some((vm_resources.memory / crate::MB).to_string()),
|
||||
scsi_hw: Some("virtio-scsi-pci".to_string()),
|
||||
serial_0: Some("socket".to_string()),
|
||||
scsi_1: Some(format!("{}:cloudinit", &value.disk.name)),
|
||||
@ -424,9 +461,74 @@ impl ProxmoxClient {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Import main disk image from the template
|
||||
async fn import_template_disk(&self, req: &FullVmInfo) -> Result<()> {
|
||||
let vm_id = req.vm.id.into();
|
||||
|
||||
// import primary disk from image (scsi0)
|
||||
self.import_disk_image(ImportDiskImageRequest {
|
||||
vm_id,
|
||||
node: self.node.clone(),
|
||||
storage: req.disk.name.clone(),
|
||||
disk: "scsi0".to_string(),
|
||||
image: req.image.filename()?,
|
||||
is_ssd: matches!(req.disk.kind, DiskType::SSD),
|
||||
})
|
||||
.await?;
|
||||
|
||||
// resize disk to match template
|
||||
let j_resize = self
|
||||
.resize_disk(ResizeDiskRequest {
|
||||
node: self.node.clone(),
|
||||
vm_id,
|
||||
disk: "scsi0".to_string(),
|
||||
size: req.resources()?.disk_size.to_string(),
|
||||
})
|
||||
.await?;
|
||||
// TODO: rollback
|
||||
self.wait_for_task(&j_resize).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl VmHostClient for ProxmoxClient {
|
||||
async fn get_info(&self) -> Result<VmHostInfo> {
|
||||
let nodes = self.list_nodes().await?;
|
||||
if let Some(n) = nodes.iter().find(|n| n.name == self.node) {
|
||||
let storages = self.list_storage(&n.name).await?;
|
||||
let info = VmHostInfo {
|
||||
cpu: n.max_cpu
|
||||
.context("Missing cpu count, please make sure you have Sys.Audit permission")?,
|
||||
memory: n.max_mem
|
||||
.context("Missing memory size, please make sure you have Sys.Audit permission")?,
|
||||
disks: storages
|
||||
.into_iter()
|
||||
.filter_map(|s| {
|
||||
let size = s.total
|
||||
.context("Missing disk size, please make sure you have Datastore.Audit permission")
|
||||
.ok()?;
|
||||
let used = s.used
|
||||
.context("Missing used disk, please make sure you have Datastore.Audit permission")
|
||||
.ok()?;
|
||||
|
||||
Some(VmHostDiskInfo {
|
||||
name: s.storage,
|
||||
size,
|
||||
used,
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
Ok(info)
|
||||
} else {
|
||||
bail!("Could not find node {}", self.node);
|
||||
}
|
||||
}
|
||||
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> Result<()> {
|
||||
let iso_storage = self.get_iso_storage(&self.node).await?;
|
||||
let files = self.list_storage_files(&self.node, &iso_storage).await?;
|
||||
@ -496,28 +598,61 @@ impl VmHostClient for ProxmoxClient {
|
||||
.await?;
|
||||
self.wait_for_task(&t_create).await?;
|
||||
|
||||
// import primary disk from image (scsi0)
|
||||
self.import_disk_image(ImportDiskImageRequest {
|
||||
vm_id,
|
||||
node: self.node.clone(),
|
||||
storage: req.disk.name.clone(),
|
||||
disk: "scsi0".to_string(),
|
||||
image: req.image.filename()?,
|
||||
is_ssd: matches!(req.disk.kind, DiskType::SSD),
|
||||
})
|
||||
.await?;
|
||||
// import template image
|
||||
self.import_template_disk(&req).await?;
|
||||
|
||||
// resize disk to match template
|
||||
let j_resize = self
|
||||
.resize_disk(ResizeDiskRequest {
|
||||
node: self.node.clone(),
|
||||
vm_id,
|
||||
disk: "scsi0".to_string(),
|
||||
size: req.resources()?.disk_size.to_string(),
|
||||
})
|
||||
// try start, otherwise ignore error (maybe its already running)
|
||||
if let Ok(j_start) = self.start_vm(&self.node, vm_id).await {
|
||||
if let Err(e) = self.wait_for_task(&j_start).await {
|
||||
warn!("Failed to start vm: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_vm(&self, vm: &Vm) -> Result<()> {
|
||||
let vm_id: ProxmoxVmId = vm.id.into();
|
||||
|
||||
// NOT IMPLEMENTED
|
||||
//let t = self.delete_vm(&self.node, vm_id).await?;
|
||||
//self.wait_for_task(&t).await?;
|
||||
|
||||
if let Some(ssh) = &self.ssh {
|
||||
let mut ses = SshClient::new()?;
|
||||
ses.connect(
|
||||
(self.api.base().host().unwrap().to_string(), 22),
|
||||
&ssh.user,
|
||||
&ssh.key,
|
||||
)
|
||||
.await?;
|
||||
// TODO: rollback
|
||||
self.wait_for_task(&j_resize).await?;
|
||||
|
||||
let cmd = format!("/usr/sbin/qm destroy {}", vm_id,);
|
||||
let (code, rsp) = ses.execute(cmd.as_str()).await?;
|
||||
info!("{}", rsp);
|
||||
if code != 0 {
|
||||
bail!("Failed to destroy vm, exit-code {}, {}", code, rsp);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn reinstall_vm(&self, req: &FullVmInfo) -> Result<()> {
|
||||
let vm_id = req.vm.id.into();
|
||||
|
||||
// try stop, otherwise ignore error (maybe its already running)
|
||||
if let Ok(j_stop) = self.stop_vm(&self.node, vm_id).await {
|
||||
if let Err(e) = self.wait_for_task(&j_stop).await {
|
||||
warn!("Failed to stop vm: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// unlink the existing main disk
|
||||
self.unlink_disk(&self.node, vm_id, vec!["scsi0".to_string()], true)
|
||||
.await?;
|
||||
|
||||
// import disk from template again
|
||||
self.import_template_disk(&req).await?;
|
||||
|
||||
// try start, otherwise ignore error (maybe its already running)
|
||||
if let Ok(j_start) = self.start_vm(&self.node, vm_id).await {
|
||||
@ -585,6 +720,31 @@ impl VmHostClient for ProxmoxClient {
|
||||
.await?;
|
||||
Ok(r.into_iter().map(TimeSeriesData::from).collect())
|
||||
}
|
||||
|
||||
async fn connect_terminal(&self, vm: &Vm) -> Result<TerminalStream> {
|
||||
let vm_id: ProxmoxVmId = vm.id.into();
|
||||
|
||||
let (mut client_tx, client_rx) = channel::<Vec<u8>>(1024);
|
||||
let (server_tx, mut server_rx) = channel::<Vec<u8>>(1024);
|
||||
tokio::spawn(async move {
|
||||
// fire calls to read every 100ms
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(buf) = server_rx.recv() => {
|
||||
// echo
|
||||
client_tx.send(buf).await?;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
info!("SSH connection terminated!");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
});
|
||||
Ok(TerminalStream {
|
||||
rx: client_rx,
|
||||
tx: server_tx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap a database vm id
|
||||
@ -771,6 +931,7 @@ pub enum StorageContent {
|
||||
ISO,
|
||||
VZTmpL,
|
||||
Import,
|
||||
Snippets,
|
||||
}
|
||||
|
||||
impl FromStr for StorageContent {
|
||||
@ -784,6 +945,7 @@ impl FromStr for StorageContent {
|
||||
"iso" => Ok(StorageContent::ISO),
|
||||
"vztmpl" => Ok(StorageContent::VZTmpL),
|
||||
"import" => Ok(StorageContent::Import),
|
||||
"snippets" => Ok(StorageContent::Snippets),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
@ -794,19 +956,28 @@ pub struct NodeStorage {
|
||||
pub content: String,
|
||||
pub storage: String,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: Option<StorageType>,
|
||||
#[serde(rename = "thinpool")]
|
||||
pub thin_pool: Option<String>,
|
||||
pub kind: StorageType,
|
||||
/// Available storage space in bytes
|
||||
#[serde(rename = "avial")]
|
||||
pub available: Option<u64>,
|
||||
/// Total storage space in bytes
|
||||
pub total: Option<u64>,
|
||||
/// Used storage space in bytes
|
||||
pub used: Option<u64>,
|
||||
}
|
||||
|
||||
impl NodeStorage {
|
||||
pub fn contents(&self) -> Vec<StorageContent> {
|
||||
self.content
|
||||
.split(",")
|
||||
.map_while(|s| s.parse().ok())
|
||||
.map_while(|s| StorageContent::from_str(&s).ok())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct NodeDisk {}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DownloadUrlRequest {
|
||||
pub content: StorageContent,
|
||||
@ -977,3 +1148,49 @@ impl From<RrdDataPoint> for TimeSeriesData {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::host::tests::mock_full_vm;
|
||||
use crate::MB;
|
||||
|
||||
#[test]
|
||||
fn test_config() -> Result<()> {
|
||||
let cfg = mock_full_vm();
|
||||
let template = cfg.template.clone().unwrap();
|
||||
|
||||
let q_cfg = QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr1".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
kvm: true,
|
||||
arch: "x86_64".to_string(),
|
||||
};
|
||||
|
||||
let p = ProxmoxClient::new(
|
||||
"http://localhost:8006".parse()?,
|
||||
"",
|
||||
"",
|
||||
None,
|
||||
q_cfg.clone(),
|
||||
None,
|
||||
);
|
||||
|
||||
let vm = p.make_config(&cfg)?;
|
||||
assert_eq!(vm.cpu, Some(q_cfg.cpu));
|
||||
assert_eq!(vm.cores, Some(template.cpu as i32));
|
||||
assert_eq!(vm.memory, Some((template.memory / MB).to_string()));
|
||||
assert_eq!(vm.on_boot, Some(true));
|
||||
assert!(vm.net.unwrap().contains("tag=100"));
|
||||
assert_eq!(
|
||||
vm.ip_config,
|
||||
Some(
|
||||
"ip=192.168.1.2/16,gw=192.168.1.1,ip=192.168.2.2/24,gw=10.10.10.10,ip6=auto"
|
||||
.to_string()
|
||||
)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
171
lnvps_api/src/json_api.rs
Normal file
171
lnvps_api/src/json_api.rs
Normal file
@ -0,0 +1,171 @@
|
||||
use anyhow::{bail, Result};
|
||||
use log::debug;
|
||||
use reqwest::header::{HeaderMap, ACCEPT, AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
|
||||
use reqwest::{Client, Method, RequestBuilder, Url};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub trait TokenGen: Send + Sync {
|
||||
fn generate_token(
|
||||
&self,
|
||||
method: Method,
|
||||
url: &Url,
|
||||
body: Option<&str>,
|
||||
req: RequestBuilder,
|
||||
) -> Result<RequestBuilder>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct JsonApi {
|
||||
client: Client,
|
||||
base: Url,
|
||||
/// Custom token generator per request
|
||||
token_gen: Option<Arc<dyn TokenGen>>,
|
||||
}
|
||||
|
||||
impl JsonApi {
|
||||
pub fn new(base: &str) -> Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(USER_AGENT, "lnvps/1.0".parse()?);
|
||||
headers.insert(ACCEPT, "application/json; charset=utf-8".parse()?);
|
||||
|
||||
let client = Client::builder().default_headers(headers).build()?;
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
token_gen: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn token(base: &str, token: &str, allow_invalid_certs: bool) -> Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(USER_AGENT, "lnvps/1.0".parse()?);
|
||||
headers.insert(AUTHORIZATION, token.parse()?);
|
||||
headers.insert(ACCEPT, "application/json; charset=utf-8".parse()?);
|
||||
|
||||
let client = Client::builder()
|
||||
.danger_accept_invalid_certs(allow_invalid_certs)
|
||||
.default_headers(headers)
|
||||
.build()?;
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
token_gen: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn token_gen(
|
||||
base: &str,
|
||||
allow_invalid_certs: bool,
|
||||
tg: impl TokenGen + 'static,
|
||||
) -> Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(USER_AGENT, "lnvps/1.0".parse()?);
|
||||
headers.insert(ACCEPT, "application/json; charset=utf-8".parse()?);
|
||||
|
||||
let client = Client::builder()
|
||||
.danger_accept_invalid_certs(allow_invalid_certs)
|
||||
.default_headers(headers)
|
||||
.build()?;
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
token_gen: Some(Arc::new(tg)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn base(&self) -> &Url {
|
||||
&self.base
|
||||
}
|
||||
|
||||
pub async fn get<T: DeserializeOwned>(&self, path: &str) -> Result<T> {
|
||||
let text = self.get_raw(path).await?;
|
||||
Ok(serde_json::from_str::<T>(&text)?)
|
||||
}
|
||||
|
||||
/// Get raw string response
|
||||
pub async fn get_raw(&self, path: &str) -> Result<String> {
|
||||
debug!(">> GET {}", path);
|
||||
let url = self.base.join(path)?;
|
||||
let mut req = self.client.request(Method::GET, url.clone());
|
||||
if let Some(gen) = &self.token_gen {
|
||||
req = gen.generate_token(Method::GET, &url, None, req)?;
|
||||
}
|
||||
let req = req.build()?;
|
||||
debug!(">> HEADERS {:?}", req.headers());
|
||||
let rsp = self.client.execute(req).await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(text)
|
||||
} else {
|
||||
bail!("{}", status);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn post<T: DeserializeOwned, R: Serialize>(&self, path: &str, body: R) -> Result<T> {
|
||||
self.req(Method::POST, path, body).await
|
||||
}
|
||||
|
||||
pub async fn req<T: DeserializeOwned, R: Serialize>(
|
||||
&self,
|
||||
method: Method,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> Result<T> {
|
||||
let body = serde_json::to_string(&body)?;
|
||||
debug!(">> {} {}: {}", method.clone(), path, &body);
|
||||
let url = self.base.join(path)?;
|
||||
let mut req = self
|
||||
.client
|
||||
.request(method.clone(), url.clone())
|
||||
.header(CONTENT_TYPE, "application/json; charset=utf-8");
|
||||
if let Some(gen) = self.token_gen.as_ref() {
|
||||
req = gen.generate_token(method.clone(), &url, Some(&body), req)?;
|
||||
}
|
||||
let req = req.body(body).build()?;
|
||||
debug!(">> HEADERS {:?}", req.headers());
|
||||
let rsp = self.client.execute(req).await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(serde_json::from_str(&text)?)
|
||||
} else {
|
||||
bail!("{} {}: {}: {}", method, url, status, &text);
|
||||
}
|
||||
}
|
||||
|
||||
/// Make a request and only return the status code
|
||||
pub async fn req_status<R: Serialize>(
|
||||
&self,
|
||||
method: Method,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> Result<u16> {
|
||||
let body = serde_json::to_string(&body)?;
|
||||
debug!(">> {} {}: {}", method.clone(), path, &body);
|
||||
let url = self.base.join(path)?;
|
||||
let mut req = self
|
||||
.client
|
||||
.request(method.clone(), url.clone())
|
||||
.header(CONTENT_TYPE, "application/json; charset=utf-8");
|
||||
if let Some(gen) = &self.token_gen {
|
||||
req = gen.generate_token(method.clone(), &url, Some(&body), req)?;
|
||||
}
|
||||
let rsp = req.body(body).send().await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(status.as_u16())
|
||||
} else {
|
||||
bail!("{} {}: {}: {}", method, url, status, &text);
|
||||
}
|
||||
}
|
||||
}
|
30
lnvps_api/src/lib.rs
Normal file
30
lnvps_api/src/lib.rs
Normal file
@ -0,0 +1,30 @@
|
||||
pub mod api;
|
||||
pub mod data_migration;
|
||||
pub mod dns;
|
||||
pub mod exchange;
|
||||
pub mod fiat;
|
||||
pub mod host;
|
||||
pub mod json_api;
|
||||
pub mod lightning;
|
||||
pub mod nip98;
|
||||
pub mod payments;
|
||||
pub mod provisioner;
|
||||
pub mod router;
|
||||
pub mod settings;
|
||||
#[cfg(feature = "proxmox")]
|
||||
pub mod ssh_client;
|
||||
pub mod status;
|
||||
pub mod worker;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod mocks;
|
||||
|
||||
#[cfg(feature = "nostr-dvm")]
|
||||
pub mod dvm;
|
||||
|
||||
/// SATS per BTC
|
||||
pub const BTC_SATS: f64 = 100_000_000.0;
|
||||
pub const KB: u64 = 1024;
|
||||
pub const MB: u64 = KB * 1024;
|
||||
pub const GB: u64 = MB * 1024;
|
||||
pub const TB: u64 = GB * 1024;
|
163
lnvps_api/src/lightning/bitvora.rs
Normal file
163
lnvps_api/src/lightning/bitvora.rs
Normal file
@ -0,0 +1,163 @@
|
||||
use crate::api::{WebhookMessage, WEBHOOK_BRIDGE};
|
||||
use crate::json_api::JsonApi;
|
||||
use crate::lightning::{AddInvoiceRequest, AddInvoiceResult, InvoiceUpdate, LightningNode};
|
||||
use anyhow::{anyhow, bail};
|
||||
use futures::{Stream, StreamExt};
|
||||
use hmac::{Hmac, Mac};
|
||||
use lnvps_db::async_trait;
|
||||
use log::{info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::pin::Pin;
|
||||
use tokio_stream::wrappers::BroadcastStream;
|
||||
|
||||
pub struct BitvoraNode {
|
||||
api: JsonApi,
|
||||
webhook_secret: String,
|
||||
}
|
||||
|
||||
impl BitvoraNode {
|
||||
pub fn new(api_token: &str, webhook_secret: &str) -> Self {
|
||||
let auth = format!("Bearer {}", api_token);
|
||||
Self {
|
||||
api: JsonApi::token("https://api.bitvora.com/", &auth, false).unwrap(),
|
||||
webhook_secret: webhook_secret.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LightningNode for BitvoraNode {
|
||||
async fn add_invoice(&self, req: AddInvoiceRequest) -> anyhow::Result<AddInvoiceResult> {
|
||||
let req = CreateInvoiceRequest {
|
||||
amount: req.amount / 1000,
|
||||
currency: "sats".to_string(),
|
||||
description: req.memo.unwrap_or_default(),
|
||||
expiry_seconds: req.expire.unwrap_or(3600) as u64,
|
||||
};
|
||||
let rsp: BitvoraResponse<CreateInvoiceResponse> = self
|
||||
.api
|
||||
.post("/v1/bitcoin/deposit/lightning-invoice", req)
|
||||
.await?;
|
||||
if rsp.status >= 400 {
|
||||
bail!(
|
||||
"API error: {} {}",
|
||||
rsp.status,
|
||||
rsp.message.unwrap_or_default()
|
||||
);
|
||||
}
|
||||
Ok(AddInvoiceResult {
|
||||
pr: rsp.data.payment_request,
|
||||
payment_hash: rsp.data.r_hash,
|
||||
external_id: Some(rsp.data.id),
|
||||
})
|
||||
}
|
||||
|
||||
async fn subscribe_invoices(
|
||||
&self,
|
||||
_from_payment_hash: Option<Vec<u8>>,
|
||||
) -> anyhow::Result<Pin<Box<dyn Stream<Item = InvoiceUpdate> + Send>>> {
|
||||
let rx = BroadcastStream::new(WEBHOOK_BRIDGE.listen());
|
||||
let secret = self.webhook_secret.clone();
|
||||
let mapped = rx.then(move |r| {
|
||||
let secret = secret.clone();
|
||||
async move {
|
||||
match r {
|
||||
Ok(r) => {
|
||||
if r.endpoint != "/api/v1/webhook/bitvora" {
|
||||
return InvoiceUpdate::Unknown;
|
||||
}
|
||||
let r_body = r.body.as_slice();
|
||||
info!("Received webhook {}", String::from_utf8_lossy(r_body));
|
||||
let body: BitvoraWebhook = match serde_json::from_slice(r_body) {
|
||||
Ok(b) => b,
|
||||
Err(e) => return InvoiceUpdate::Error(e.to_string()),
|
||||
};
|
||||
|
||||
if let Err(e) = verify_webhook(&secret, &r) {
|
||||
return InvoiceUpdate::Error(e.to_string());
|
||||
}
|
||||
|
||||
match body.event {
|
||||
BitvoraWebhookEvent::DepositLightningComplete => {
|
||||
InvoiceUpdate::Settled {
|
||||
payment_hash: None,
|
||||
external_id: Some(body.data.lightning_invoice_id),
|
||||
}
|
||||
}
|
||||
BitvoraWebhookEvent::DepositLightningFailed => {
|
||||
InvoiceUpdate::Error("Payment failed".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error handling webhook: {}", e);
|
||||
InvoiceUpdate::Error(e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(Box::pin(mapped))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct CreateInvoiceRequest {
|
||||
pub amount: u64,
|
||||
pub currency: String,
|
||||
pub description: String,
|
||||
pub expiry_seconds: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
struct BitvoraResponse<T> {
|
||||
pub status: isize,
|
||||
pub message: Option<String>,
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct CreateInvoiceResponse {
|
||||
pub id: String,
|
||||
pub r_hash: String,
|
||||
pub payment_request: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
struct BitvoraWebhook {
|
||||
pub event: BitvoraWebhookEvent,
|
||||
pub data: BitvoraPayment,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
enum BitvoraWebhookEvent {
|
||||
#[serde(rename = "deposit.lightning.completed")]
|
||||
DepositLightningComplete,
|
||||
#[serde(rename = "deposit.lightning.failed")]
|
||||
DepositLightningFailed,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
struct BitvoraPayment {
|
||||
pub id: String,
|
||||
pub lightning_invoice_id: String,
|
||||
}
|
||||
|
||||
type HmacSha256 = Hmac<sha2::Sha256>;
|
||||
fn verify_webhook(secret: &str, msg: &WebhookMessage) -> anyhow::Result<()> {
|
||||
let sig = msg
|
||||
.headers
|
||||
.get("bitvora-signature")
|
||||
.ok_or_else(|| anyhow!("Missing bitvora-signature header"))?;
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(secret.as_bytes())?;
|
||||
mac.update(msg.body.as_slice());
|
||||
let result = mac.finalize().into_bytes();
|
||||
|
||||
if hex::encode(result) == *sig {
|
||||
return Ok(());
|
||||
} else {
|
||||
warn!("Invalid signature found {} != {}", sig, hex::encode(result));
|
||||
}
|
||||
|
||||
bail!("No valid signature found!");
|
||||
}
|
@ -40,6 +40,7 @@ impl LightningNode for LndNode {
|
||||
Ok(AddInvoiceResult {
|
||||
pr: inner.payment_request,
|
||||
payment_hash: hex::encode(inner.r_hash),
|
||||
external_id: None,
|
||||
})
|
||||
}
|
||||
|
||||
@ -78,8 +79,8 @@ impl LightningNode for LndNode {
|
||||
Ok(m) => {
|
||||
if m.state == InvoiceState::Settled as i32 {
|
||||
InvoiceUpdate::Settled {
|
||||
settle_index: m.settle_index,
|
||||
payment_hash: hex::encode(m.r_hash),
|
||||
payment_hash: Some(hex::encode(m.r_hash)),
|
||||
external_id: None,
|
||||
}
|
||||
} else {
|
||||
InvoiceUpdate::Unknown
|
@ -31,6 +31,7 @@ pub struct AddInvoiceRequest {
|
||||
pub struct AddInvoiceResult {
|
||||
pub pr: String,
|
||||
pub payment_hash: String,
|
||||
pub external_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@ -39,8 +40,8 @@ pub enum InvoiceUpdate {
|
||||
Unknown,
|
||||
Error(String),
|
||||
Settled {
|
||||
payment_hash: String,
|
||||
settle_index: u64,
|
||||
payment_hash: Option<String>,
|
||||
external_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
@ -1,19 +1,16 @@
|
||||
#![allow(unused)]
|
||||
use crate::dns::{BasicRecord, DnsServer, RecordType};
|
||||
use crate::exchange::{ExchangeRateService, Ticker, TickerRate};
|
||||
use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient};
|
||||
use crate::host::{
|
||||
FullVmInfo, TerminalStream, TimeSeries, TimeSeriesData, VmHostClient, VmHostInfo,
|
||||
};
|
||||
use crate::lightning::{AddInvoiceRequest, AddInvoiceResult, InvoiceUpdate, LightningNode};
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use crate::settings::NetworkPolicy;
|
||||
use crate::status::{VmRunningState, VmState};
|
||||
use anyhow::{anyhow, bail, ensure, Context};
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use fedimint_tonic_lnd::tonic::codegen::tokio_stream::Stream;
|
||||
use lnvps_db::{
|
||||
async_trait, DiskInterface, DiskType, IpRange, LNVpsDb, OsDistribution, User, UserSshKey, Vm,
|
||||
VmCostPlan, VmCostPlanIntervalType, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate,
|
||||
VmHost, VmHostDisk, VmHostKind, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment, VmTemplate,
|
||||
};
|
||||
use lnvps_db::{async_trait, AccessPolicy, Company, DiskInterface, DiskType, IpRange, IpRangeAllocationMode, LNVPSNostrDb, LNVpsDb, NostrDomain, NostrDomainHandle, OsDistribution, User, UserSshKey, Vm, VmCostPlan, VmCostPlanIntervalType, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate, VmHost, VmHostDisk, VmHostKind, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment, VmTemplate};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::pin::Pin;
|
||||
@ -37,14 +34,11 @@ pub struct MockDb {
|
||||
pub custom_pricing_disk: Arc<Mutex<HashMap<u64, VmCustomPricingDisk>>>,
|
||||
pub custom_template: Arc<Mutex<HashMap<u64, VmCustomTemplate>>>,
|
||||
pub payments: Arc<Mutex<Vec<VmPayment>>>,
|
||||
pub router: Arc<Mutex<HashMap<u64, lnvps_db::Router>>>,
|
||||
pub access_policy: Arc<Mutex<HashMap<u64, AccessPolicy>>>,
|
||||
}
|
||||
|
||||
impl MockDb {
|
||||
pub const KB: u64 = 1024;
|
||||
pub const MB: u64 = Self::KB * 1024;
|
||||
pub const GB: u64 = Self::MB * 1024;
|
||||
pub const TB: u64 = Self::GB * 1024;
|
||||
|
||||
pub fn empty() -> MockDb {
|
||||
Self {
|
||||
..Default::default()
|
||||
@ -71,8 +65,8 @@ impl MockDb {
|
||||
created: Utc::now(),
|
||||
expires: None,
|
||||
cpu: 2,
|
||||
memory: Self::GB * 2,
|
||||
disk_size: Self::GB * 64,
|
||||
memory: crate::GB * 2,
|
||||
disk_size: crate::GB * 64,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
@ -109,6 +103,7 @@ impl Default for MockDb {
|
||||
id: 1,
|
||||
name: "Mock".to_string(),
|
||||
enabled: true,
|
||||
company_id: None,
|
||||
},
|
||||
);
|
||||
let mut ip_ranges = HashMap::new();
|
||||
@ -120,6 +115,19 @@ impl Default for MockDb {
|
||||
gateway: "10.0.0.1/8".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
ip_ranges.insert(
|
||||
2,
|
||||
IpRange {
|
||||
id: 2,
|
||||
cidr: "fd00::/64".to_string(),
|
||||
gateway: "fd00::1".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
allocation_mode: IpRangeAllocationMode::SlaacEui64,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let mut hosts = HashMap::new();
|
||||
@ -132,10 +140,13 @@ impl Default for MockDb {
|
||||
name: "mock-host".to_string(),
|
||||
ip: "https://localhost".to_string(),
|
||||
cpu: 4,
|
||||
memory: 8 * Self::GB,
|
||||
memory: 8 * crate::GB,
|
||||
enabled: true,
|
||||
api_token: "".to_string(),
|
||||
load_factor: 1.5,
|
||||
load_cpu: 1.5,
|
||||
load_memory: 2.0,
|
||||
load_disk: 3.0,
|
||||
vlan_id: Some(100),
|
||||
},
|
||||
);
|
||||
let mut host_disks = HashMap::new();
|
||||
@ -145,7 +156,7 @@ impl Default for MockDb {
|
||||
id: 1,
|
||||
host_id: 1,
|
||||
name: "mock-disk".to_string(),
|
||||
size: Self::TB * 10,
|
||||
size: crate::TB * 10,
|
||||
kind: DiskType::SSD,
|
||||
interface: DiskInterface::PCIe,
|
||||
enabled: true,
|
||||
@ -166,6 +177,7 @@ impl Default for MockDb {
|
||||
enabled: true,
|
||||
release_date: Utc::now(),
|
||||
url: "https://example.com/debian_12.img".to_string(),
|
||||
default_username: None,
|
||||
},
|
||||
);
|
||||
Self {
|
||||
@ -184,10 +196,63 @@ impl Default for MockDb {
|
||||
user_ssh_keys: Arc::new(Mutex::new(Default::default())),
|
||||
custom_template: Arc::new(Default::default()),
|
||||
payments: Arc::new(Default::default()),
|
||||
router: Arc::new(Default::default()),
|
||||
access_policy: Arc::new(Default::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LNVPSNostrDb for MockDb {
|
||||
async fn get_handle(&self, handle_id: u64) -> anyhow::Result<NostrDomainHandle> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_handle_by_name(
|
||||
&self,
|
||||
domain_id: u64,
|
||||
handle: &str,
|
||||
) -> anyhow::Result<NostrDomainHandle> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn insert_handle(&self, handle: &NostrDomainHandle) -> anyhow::Result<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn update_handle(&self, handle: &NostrDomainHandle) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn delete_handle(&self, handle_id: u64) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn list_handles(&self, domain_id: u64) -> anyhow::Result<Vec<NostrDomainHandle>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_domain(&self, id: u64) -> anyhow::Result<NostrDomain> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_domain_by_name(&self, name: &str) -> anyhow::Result<NostrDomain> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn list_domains(&self, owner_id: u64) -> anyhow::Result<Vec<NostrDomain>> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn insert_domain(&self, domain: &NostrDomain) -> anyhow::Result<u64> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn delete_domain(&self, domain_id: u64) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LNVpsDb for MockDb {
|
||||
async fn migrate(&self) -> anyhow::Result<()> {
|
||||
@ -206,9 +271,8 @@ impl LNVpsDb for MockDb {
|
||||
id: max + 1,
|
||||
pubkey: pubkey.to_vec(),
|
||||
created: Utc::now(),
|
||||
email: None,
|
||||
contact_nip17: false,
|
||||
contact_email: false,
|
||||
country_code: Some("USA".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
Ok(max + 1)
|
||||
@ -269,11 +333,26 @@ impl LNVpsDb for MockDb {
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn list_host_region(&self) -> anyhow::Result<Vec<VmHostRegion>> {
|
||||
let regions = self.regions.lock().await;
|
||||
Ok(regions.values().filter(|r| r.enabled).cloned().collect())
|
||||
}
|
||||
|
||||
async fn get_host_region(&self, id: u64) -> anyhow::Result<VmHostRegion> {
|
||||
let regions = self.regions.lock().await;
|
||||
Ok(regions.get(&id).ok_or(anyhow!("no region"))?.clone())
|
||||
}
|
||||
|
||||
async fn get_host_region_by_name(&self, name: &str) -> anyhow::Result<VmHostRegion> {
|
||||
let regions = self.regions.lock().await;
|
||||
Ok(regions
|
||||
.iter()
|
||||
.find(|(_, v)| v.name == name)
|
||||
.ok_or(anyhow!("no region"))?
|
||||
.1
|
||||
.clone())
|
||||
}
|
||||
|
||||
async fn list_hosts(&self) -> anyhow::Result<Vec<VmHost>> {
|
||||
let hosts = self.hosts.lock().await;
|
||||
Ok(hosts.values().filter(|h| h.enabled).cloned().collect())
|
||||
@ -304,6 +383,16 @@ impl LNVpsDb for MockDb {
|
||||
Ok(disks.get(&disk_id).ok_or(anyhow!("no disk"))?.clone())
|
||||
}
|
||||
|
||||
async fn update_host_disk(&self, disk: &VmHostDisk) -> anyhow::Result<()> {
|
||||
let mut disks = self.host_disks.lock().await;
|
||||
if let Some(d) = disks.get_mut(&disk.id) {
|
||||
d.size = disk.size;
|
||||
d.kind = disk.kind;
|
||||
d.interface = disk.interface;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_os_image(&self, id: u64) -> anyhow::Result<VmOsImage> {
|
||||
let os_images = self.os_images.lock().await;
|
||||
Ok(os_images.get(&id).ok_or(anyhow!("no image"))?.clone())
|
||||
@ -439,6 +528,7 @@ impl LNVpsDb for MockDb {
|
||||
let mut vms = self.vms.lock().await;
|
||||
if let Some(v) = vms.get_mut(&vm.id) {
|
||||
v.ssh_key_id = vm.ssh_key_id;
|
||||
v.mac_address = vm.mac_address.clone();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -458,7 +548,7 @@ impl LNVpsDb for MockDb {
|
||||
|
||||
async fn update_vm_ip_assignment(&self, ip_assignment: &VmIpAssignment) -> anyhow::Result<()> {
|
||||
let mut ip_assignments = self.ip_assignments.lock().await;
|
||||
if let Some(i) = ip_assignments.get_mut(&ip_assignment.vm_id) {
|
||||
if let Some(i) = ip_assignments.get_mut(&ip_assignment.id) {
|
||||
i.arp_ref = ip_assignment.arp_ref.clone();
|
||||
i.dns_forward = ip_assignment.dns_forward.clone();
|
||||
i.dns_reverse = ip_assignment.dns_reverse.clone();
|
||||
@ -518,11 +608,18 @@ impl LNVpsDb for MockDb {
|
||||
.clone())
|
||||
}
|
||||
|
||||
async fn get_vm_payment_by_ext_id(&self, id: &str) -> anyhow::Result<VmPayment> {
|
||||
let p = self.payments.lock().await;
|
||||
Ok(p.iter()
|
||||
.find(|p| p.external_id == Some(id.to_string()))
|
||||
.context("no vm_payment")?
|
||||
.clone())
|
||||
}
|
||||
|
||||
async fn update_vm_payment(&self, vm_payment: &VmPayment) -> anyhow::Result<()> {
|
||||
let mut p = self.payments.lock().await;
|
||||
if let Some(p) = p.iter_mut().find(|p| p.id == *vm_payment.id) {
|
||||
p.is_paid = vm_payment.is_paid.clone();
|
||||
p.settle_index = vm_payment.settle_index.clone();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -539,7 +636,8 @@ impl LNVpsDb for MockDb {
|
||||
async fn last_paid_invoice(&self) -> anyhow::Result<Option<VmPayment>> {
|
||||
let p = self.payments.lock().await;
|
||||
Ok(p.iter()
|
||||
.max_by(|a, b| a.settle_index.cmp(&b.settle_index))
|
||||
.filter(|p| p.is_paid)
|
||||
.max_by(|a, b| a.created.cmp(&b.created))
|
||||
.map(|v| v.clone()))
|
||||
}
|
||||
|
||||
@ -581,27 +679,45 @@ impl LNVpsDb for MockDb {
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn get_router(&self, router_id: u64) -> anyhow::Result<lnvps_db::Router> {
|
||||
let r = self.router.lock().await;
|
||||
Ok(r.get(&router_id).cloned().context("no router")?)
|
||||
}
|
||||
|
||||
async fn get_access_policy(&self, access_policy_id: u64) -> anyhow::Result<AccessPolicy> {
|
||||
let p = self.access_policy.lock().await;
|
||||
Ok(p.get(&access_policy_id)
|
||||
.cloned()
|
||||
.context("no access policy")?)
|
||||
}
|
||||
|
||||
async fn get_company(&self, company_id: u64) -> anyhow::Result<Company> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MockRouter {
|
||||
pub policy: NetworkPolicy,
|
||||
arp: Arc<Mutex<HashMap<u64, ArpEntry>>>,
|
||||
}
|
||||
|
||||
impl MockRouter {
|
||||
pub fn new(policy: NetworkPolicy) -> Self {
|
||||
pub fn new() -> Self {
|
||||
static LAZY_ARP: LazyLock<Arc<Mutex<HashMap<u64, ArpEntry>>>> =
|
||||
LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
|
||||
|
||||
Self {
|
||||
policy,
|
||||
arp: LAZY_ARP.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl Router for MockRouter {
|
||||
async fn generate_mac(&self, ip: &str, comment: &str) -> anyhow::Result<Option<ArpEntry>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn list_arp_entry(&self) -> anyhow::Result<Vec<ArpEntry>> {
|
||||
let arp = self.arp.lock().await;
|
||||
Ok(arp.values().cloned().collect())
|
||||
@ -642,14 +758,15 @@ impl Router for MockRouter {
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MockNode {
|
||||
invoices: Arc<Mutex<HashMap<String, MockInvoice>>>,
|
||||
pub invoices: Arc<Mutex<HashMap<String, MockInvoice>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct MockInvoice {
|
||||
pr: String,
|
||||
expiry: DateTime<Utc>,
|
||||
settle_index: u64,
|
||||
pub struct MockInvoice {
|
||||
pub pr: String,
|
||||
pub amount: u64,
|
||||
pub expiry: DateTime<Utc>,
|
||||
pub is_paid: bool,
|
||||
}
|
||||
|
||||
impl MockNode {
|
||||
@ -665,7 +782,23 @@ impl MockNode {
|
||||
#[async_trait]
|
||||
impl LightningNode for MockNode {
|
||||
async fn add_invoice(&self, req: AddInvoiceRequest) -> anyhow::Result<AddInvoiceResult> {
|
||||
todo!()
|
||||
let mut invoices = self.invoices.lock().await;
|
||||
let id: [u8; 32] = rand::random();
|
||||
let hex_id = hex::encode(id);
|
||||
invoices.insert(
|
||||
hex_id.clone(),
|
||||
MockInvoice {
|
||||
pr: format!("lnrt1{}", hex_id),
|
||||
amount: req.amount,
|
||||
expiry: Utc::now().add(TimeDelta::seconds(req.expire.unwrap_or(3600) as i64)),
|
||||
is_paid: false,
|
||||
},
|
||||
);
|
||||
Ok(AddInvoiceResult {
|
||||
pr: format!("lnrt1{}", hex_id),
|
||||
payment_hash: hex_id.clone(),
|
||||
external_id: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn subscribe_invoices(
|
||||
@ -698,6 +831,10 @@ impl MockVmHost {
|
||||
|
||||
#[async_trait]
|
||||
impl VmHostClient for MockVmHost {
|
||||
async fn get_info(&self) -> anyhow::Result<VmHostInfo> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
@ -747,6 +884,16 @@ impl VmHostClient for MockVmHost {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_vm(&self, vm: &Vm) -> anyhow::Result<()> {
|
||||
let mut vms = self.vms.lock().await;
|
||||
vms.remove(&vm.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_vm_state(&self, vm: &Vm) -> anyhow::Result<VmState> {
|
||||
let vms = self.vms.lock().await;
|
||||
if let Some(vm) = vms.get(&vm.id) {
|
||||
@ -777,11 +924,14 @@ impl VmHostClient for MockVmHost {
|
||||
) -> anyhow::Result<Vec<TimeSeriesData>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn connect_terminal(&self, vm: &Vm) -> anyhow::Result<TerminalStream> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MockDnsServer {
|
||||
pub forward: Arc<Mutex<HashMap<String, MockDnsEntry>>>,
|
||||
pub reverse: Arc<Mutex<HashMap<String, MockDnsEntry>>>,
|
||||
pub zones: Arc<Mutex<HashMap<String, HashMap<String, MockDnsEntry>>>>,
|
||||
}
|
||||
|
||||
pub struct MockDnsEntry {
|
||||
@ -792,25 +942,28 @@ pub struct MockDnsEntry {
|
||||
|
||||
impl MockDnsServer {
|
||||
pub fn new() -> Self {
|
||||
static LAZY_FWD: LazyLock<Arc<Mutex<HashMap<String, MockDnsEntry>>>> =
|
||||
LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
|
||||
static LAZY_REV: LazyLock<Arc<Mutex<HashMap<String, MockDnsEntry>>>> =
|
||||
static LAZY_ZONES: LazyLock<Arc<Mutex<HashMap<String, HashMap<String, MockDnsEntry>>>>> =
|
||||
LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
|
||||
Self {
|
||||
forward: LAZY_FWD.clone(),
|
||||
reverse: LAZY_REV.clone(),
|
||||
zones: LAZY_ZONES.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl DnsServer for MockDnsServer {
|
||||
async fn add_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let mut table = match record.kind {
|
||||
RecordType::PTR => self.reverse.lock().await,
|
||||
_ => self.forward.lock().await,
|
||||
async fn add_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let mut zones = self.zones.lock().await;
|
||||
let table = if let Some(t) = zones.get_mut(zone_id) {
|
||||
t
|
||||
} else {
|
||||
zones.insert(zone_id.to_string(), HashMap::new());
|
||||
zones.get_mut(zone_id).unwrap()
|
||||
};
|
||||
|
||||
if table.values().any(|v| v.name == record.name) {
|
||||
if table
|
||||
.values()
|
||||
.any(|v| v.name == record.name && v.kind == record.kind.to_string())
|
||||
{
|
||||
bail!("Duplicate record with name {}", record.name);
|
||||
}
|
||||
|
||||
@ -835,20 +988,30 @@ impl DnsServer for MockDnsServer {
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_record(&self, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let mut table = match record.kind {
|
||||
RecordType::PTR => self.reverse.lock().await,
|
||||
_ => self.forward.lock().await,
|
||||
async fn delete_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let mut zones = self.zones.lock().await;
|
||||
let table = if let Some(t) = zones.get_mut(zone_id) {
|
||||
t
|
||||
} else {
|
||||
zones.insert(zone_id.to_string(), HashMap::new());
|
||||
zones.get_mut(zone_id).unwrap()
|
||||
};
|
||||
ensure!(record.id.is_some(), "Id is missing");
|
||||
table.remove(record.id.as_ref().unwrap());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let mut table = match record.kind {
|
||||
RecordType::PTR => self.reverse.lock().await,
|
||||
_ => self.forward.lock().await,
|
||||
async fn update_record(
|
||||
&self,
|
||||
zone_id: &str,
|
||||
record: &BasicRecord,
|
||||
) -> anyhow::Result<BasicRecord> {
|
||||
let mut zones = self.zones.lock().await;
|
||||
let table = if let Some(t) = zones.get_mut(zone_id) {
|
||||
t
|
||||
} else {
|
||||
zones.insert(zone_id.to_string(), HashMap::new());
|
||||
zones.get_mut(zone_id).unwrap()
|
||||
};
|
||||
ensure!(record.id.is_some(), "Id is missing");
|
||||
if let Some(mut r) = table.get_mut(record.id.as_ref().unwrap()) {
|
@ -98,7 +98,7 @@ impl<'r> FromRequest<'r> for Nip98Auth {
|
||||
}
|
||||
let auth = Nip98Auth::from_base64(&auth[6..]).unwrap();
|
||||
match auth.check(
|
||||
request.uri().to_string().as_str(),
|
||||
request.uri().path().to_string().as_str(),
|
||||
request.method().as_str(),
|
||||
) {
|
||||
Ok(_) => Outcome::Success(auth),
|
87
lnvps_api/src/payments/invoice.rs
Normal file
87
lnvps_api/src/payments/invoice.rs
Normal file
@ -0,0 +1,87 @@
|
||||
use crate::lightning::{InvoiceUpdate, LightningNode};
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::{LNVpsDb, VmPayment};
|
||||
use log::{error, info, warn};
|
||||
use nostr::util::hex;
|
||||
use rocket::futures::StreamExt;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
pub struct NodeInvoiceHandler {
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
}
|
||||
|
||||
impl NodeInvoiceHandler {
|
||||
pub fn new(
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
) -> Self {
|
||||
Self { node, tx, db }
|
||||
}
|
||||
|
||||
async fn mark_paid(&self, id: &Vec<u8>) -> Result<()> {
|
||||
let p = self.db.get_vm_payment(id).await?;
|
||||
self.mark_payment_paid(&p).await
|
||||
}
|
||||
|
||||
async fn mark_paid_ext_id(&self, external_id: &str) -> Result<()> {
|
||||
let p = self.db.get_vm_payment_by_ext_id(external_id).await?;
|
||||
self.mark_payment_paid(&p).await
|
||||
}
|
||||
|
||||
async fn mark_payment_paid(&self, payment: &VmPayment) -> Result<()> {
|
||||
self.db.vm_payment_paid(&payment).await?;
|
||||
|
||||
info!(
|
||||
"VM payment {} for {}, paid",
|
||||
hex::encode(&payment.id),
|
||||
payment.vm_id
|
||||
);
|
||||
self.tx.send(WorkJob::CheckVm {
|
||||
vm_id: payment.vm_id,
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self) -> Result<()> {
|
||||
let from_ph = self.db.last_paid_invoice().await?.map(|i| i.id.clone());
|
||||
info!(
|
||||
"Listening for invoices from {}",
|
||||
from_ph
|
||||
.as_ref()
|
||||
.map(hex::encode)
|
||||
.unwrap_or("NOW".to_string())
|
||||
);
|
||||
|
||||
let mut handler = self.node.subscribe_invoices(from_ph).await?;
|
||||
while let Some(msg) = handler.next().await {
|
||||
match msg {
|
||||
InvoiceUpdate::Settled {
|
||||
payment_hash,
|
||||
external_id,
|
||||
} => {
|
||||
if let Some(h) = payment_hash {
|
||||
let r_hash = hex::decode(h)?;
|
||||
if let Err(e) = self.mark_paid(&r_hash).await {
|
||||
error!("{}", e);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if let Some(e) = external_id {
|
||||
if let Err(e) = self.mark_paid_ext_id(&e).await {
|
||||
error!("{}", e);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
v => warn!("Unknown invoice update: {:?}", v),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
55
lnvps_api/src/payments/mod.rs
Normal file
55
lnvps_api/src/payments/mod.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use crate::lightning::LightningNode;
|
||||
use crate::payments::invoice::NodeInvoiceHandler;
|
||||
use crate::settings::Settings;
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::error;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::time::sleep;
|
||||
|
||||
mod invoice;
|
||||
#[cfg(feature = "revolut")]
|
||||
mod revolut;
|
||||
|
||||
pub fn listen_all_payments(
|
||||
settings: &Settings,
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
sender: UnboundedSender<WorkJob>,
|
||||
) -> Result<()> {
|
||||
let mut handler = NodeInvoiceHandler::new(node.clone(), db.clone(), sender.clone());
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = handler.listen().await {
|
||||
error!("invoice-error: {}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
{
|
||||
use crate::payments::revolut::RevolutPaymentHandler;
|
||||
if let Some(r) = &settings.revolut {
|
||||
let mut handler = RevolutPaymentHandler::new(
|
||||
r.clone(),
|
||||
&settings.public_url,
|
||||
db.clone(),
|
||||
sender.clone(),
|
||||
)?;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = handler.listen().await {
|
||||
error!("revolut-error: {}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
156
lnvps_api/src/payments/revolut.rs
Normal file
156
lnvps_api/src/payments/revolut.rs
Normal file
@ -0,0 +1,156 @@
|
||||
use crate::api::{WebhookMessage, WEBHOOK_BRIDGE};
|
||||
use crate::fiat::{RevolutApi, RevolutWebhookEvent};
|
||||
use crate::settings::RevolutConfig;
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use hmac::{Hmac, Mac};
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::{error, info, warn};
|
||||
use reqwest::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
pub struct RevolutPaymentHandler {
|
||||
api: RevolutApi,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
sender: UnboundedSender<WorkJob>,
|
||||
public_url: String,
|
||||
}
|
||||
|
||||
impl RevolutPaymentHandler {
|
||||
pub fn new(
|
||||
settings: RevolutConfig,
|
||||
public_url: &str,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
sender: UnboundedSender<WorkJob>,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
api: RevolutApi::new(settings)?,
|
||||
public_url: public_url.to_string(),
|
||||
db,
|
||||
sender,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self) -> Result<()> {
|
||||
let this_webhook = Url::parse(&self.public_url)?.join("/api/v1/webhook/revolut")?;
|
||||
let webhooks = self.api.list_webhooks().await?;
|
||||
for wh in webhooks {
|
||||
info!("Deleting old webhook: {} {}", wh.id, wh.url);
|
||||
self.api.delete_webhook(&wh.id).await?
|
||||
}
|
||||
info!("Setting up webhook for '{}'", this_webhook);
|
||||
let wh = self
|
||||
.api
|
||||
.create_webhook(
|
||||
this_webhook.as_str(),
|
||||
vec![
|
||||
RevolutWebhookEvent::OrderCompleted,
|
||||
RevolutWebhookEvent::OrderAuthorised,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let secret = wh.signing_secret.context("Signing secret is missing")?;
|
||||
// listen to events
|
||||
let mut listenr = WEBHOOK_BRIDGE.listen();
|
||||
while let Ok(m) = listenr.recv().await {
|
||||
if m.endpoint != "/api/v1/webhook/revolut" {
|
||||
continue;
|
||||
}
|
||||
let body: RevolutWebhook = serde_json::from_slice(m.body.as_slice())?;
|
||||
info!("Received webhook {:?}", body);
|
||||
if let Err(e) = verify_webhook(&secret, &m) {
|
||||
error!("Signature verification failed: {}", e);
|
||||
continue;
|
||||
}
|
||||
|
||||
if let RevolutWebhookEvent::OrderCompleted = body.event {
|
||||
if let Err(e) = self.try_complete_payment(&body.order_id).await {
|
||||
error!("Failed to complete order: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_complete_payment(&self, ext_id: &str) -> Result<()> {
|
||||
let mut p = self.db.get_vm_payment_by_ext_id(ext_id).await?;
|
||||
|
||||
// save payment state json into external_data
|
||||
// TODO: encrypt payment_data
|
||||
let order = self.api.get_order(ext_id).await?;
|
||||
p.external_data = serde_json::to_string(&order)?;
|
||||
|
||||
// check user country matches card country
|
||||
if let Some(cc) = order
|
||||
.payments
|
||||
.and_then(|p| p.first().cloned())
|
||||
.and_then(|p| p.payment_method)
|
||||
.and_then(|p| p.card_country_code)
|
||||
.and_then(|c| CountryCode::for_alpha2(&c).ok())
|
||||
{
|
||||
let vm = self.db.get_vm(p.vm_id).await?;
|
||||
let mut user = self.db.get_user(vm.user_id).await?;
|
||||
if user.country_code.is_none() {
|
||||
// update user country code to match card country
|
||||
user.country_code = Some(cc.alpha3().to_string());
|
||||
self.db.update_user(&user).await?;
|
||||
}
|
||||
}
|
||||
|
||||
self.db.vm_payment_paid(&p).await?;
|
||||
self.sender.send(WorkJob::CheckVm { vm_id: p.vm_id })?;
|
||||
info!("VM payment {} for {}, paid", hex::encode(p.id), p.vm_id);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
type HmacSha256 = Hmac<sha2::Sha256>;
|
||||
fn verify_webhook(secret: &str, msg: &WebhookMessage) -> Result<()> {
|
||||
let sig = msg
|
||||
.headers
|
||||
.get("revolut-signature")
|
||||
.ok_or_else(|| anyhow!("Missing Revolut-Signature header"))?;
|
||||
let timestamp = msg
|
||||
.headers
|
||||
.get("revolut-request-timestamp")
|
||||
.ok_or_else(|| anyhow!("Missing Revolut-Request-Timestamp header"))?;
|
||||
|
||||
// check if any signatures match
|
||||
for sig in sig.split(",") {
|
||||
let mut sig_split = sig.split("=");
|
||||
let (version, code) = (
|
||||
sig_split.next().context("Invalid signature format")?,
|
||||
sig_split.next().context("Invalid signature format")?,
|
||||
);
|
||||
let mut mac = HmacSha256::new_from_slice(secret.as_bytes())?;
|
||||
mac.update(version.as_bytes());
|
||||
mac.update(b".");
|
||||
mac.update(timestamp.as_bytes());
|
||||
mac.update(b".");
|
||||
mac.update(msg.body.as_slice());
|
||||
let result = mac.finalize().into_bytes();
|
||||
|
||||
if hex::encode(result) == code {
|
||||
return Ok(());
|
||||
} else {
|
||||
warn!(
|
||||
"Invalid signature found {} != {}",
|
||||
code,
|
||||
hex::encode(result)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
bail!("No valid signature found!");
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
struct RevolutWebhook {
|
||||
pub event: RevolutWebhookEvent,
|
||||
pub order_id: String,
|
||||
pub merchant_order_ext_ref: Option<String>,
|
||||
}
|
@ -2,8 +2,10 @@ use crate::provisioner::Template;
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::Utc;
|
||||
use futures::future::join_all;
|
||||
use ipnetwork::{IpNetwork, NetworkSize};
|
||||
use lnvps_db::{
|
||||
DiskInterface, DiskType, LNVpsDb, VmCustomTemplate, VmHost, VmHostDisk, VmTemplate,
|
||||
DiskInterface, DiskType, IpRange, LNVpsDb, VmCustomTemplate, VmHost, VmHostDisk,
|
||||
VmIpAssignment, VmTemplate,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
@ -80,8 +82,25 @@ impl HostCapacityService {
|
||||
disk_interface: Option<DiskInterface>,
|
||||
) -> Result<HostCapacity> {
|
||||
let vms = self.db.list_vms_on_host(host.id).await?;
|
||||
|
||||
// load ip ranges
|
||||
let ip_ranges = self.db.list_ip_range_in_region(host.region_id).await?;
|
||||
// TODO: handle very large number of assignments, maybe just count assignments
|
||||
let ip_range_assigned: Vec<VmIpAssignment> = join_all(
|
||||
ip_ranges
|
||||
.iter()
|
||||
.map(|r| self.db.list_vm_ip_assignments_in_range(r.id)),
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|r| r.ok())
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
// TODO: filter disks from DB? Should be very few disks anyway
|
||||
let storage = self.db.list_host_disks(host.id).await?;
|
||||
|
||||
// load templates
|
||||
let templates = self.db.list_vm_templates().await?;
|
||||
let custom_templates: Vec<Result<VmCustomTemplate>> = join_all(
|
||||
vms.iter()
|
||||
@ -145,10 +164,10 @@ impl HostCapacityService {
|
||||
.map(|s| {
|
||||
let usage = vm_resources
|
||||
.iter()
|
||||
.filter(|(k, v)| s.id == v.disk_id)
|
||||
.fold(0, |acc, (k, v)| acc + v.disk);
|
||||
.filter(|(_k, v)| s.id == v.disk_id)
|
||||
.fold(0, |acc, (_k, v)| acc + v.disk);
|
||||
DiskCapacity {
|
||||
load_factor: host.load_factor,
|
||||
load_factor: host.load_disk,
|
||||
disk: s.clone(),
|
||||
usage,
|
||||
}
|
||||
@ -161,19 +180,40 @@ impl HostCapacityService {
|
||||
let memory_consumed = vm_resources.values().fold(0, |acc, vm| acc + vm.memory);
|
||||
|
||||
Ok(HostCapacity {
|
||||
load_factor: host.load_factor,
|
||||
load_factor: LoadFactors {
|
||||
cpu: host.load_cpu,
|
||||
memory: host.load_memory,
|
||||
disk: host.load_disk,
|
||||
},
|
||||
host: host.clone(),
|
||||
cpu: cpu_consumed,
|
||||
memory: memory_consumed,
|
||||
disks: storage_disks,
|
||||
ranges: ip_ranges
|
||||
.into_iter()
|
||||
.map(|r| IPRangeCapacity {
|
||||
usage: ip_range_assigned
|
||||
.iter()
|
||||
.filter(|z| z.ip_range_id == r.id)
|
||||
.count() as u128,
|
||||
range: r,
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LoadFactors {
|
||||
pub cpu: f32,
|
||||
pub memory: f32,
|
||||
pub disk: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HostCapacity {
|
||||
/// Load factor applied to resource consumption
|
||||
pub load_factor: f32,
|
||||
pub load_factor: LoadFactors,
|
||||
/// The host
|
||||
pub host: VmHost,
|
||||
/// Number of consumed CPU cores
|
||||
@ -182,6 +222,8 @@ pub struct HostCapacity {
|
||||
pub memory: u64,
|
||||
/// List of disks on the host and its used space
|
||||
pub disks: Vec<DiskCapacity>,
|
||||
/// List of IP ranges and its usage
|
||||
pub ranges: Vec<IPRangeCapacity>,
|
||||
}
|
||||
|
||||
impl HostCapacity {
|
||||
@ -192,23 +234,24 @@ impl HostCapacity {
|
||||
|
||||
/// CPU usage as a percentage
|
||||
pub fn cpu_load(&self) -> f32 {
|
||||
self.cpu as f32 / (self.host.cpu as f32 * self.load_factor)
|
||||
self.cpu as f32 / (self.host.cpu as f32 * self.load_factor.cpu)
|
||||
}
|
||||
|
||||
/// Total number of available CPUs
|
||||
pub fn available_cpu(&self) -> u16 {
|
||||
let loaded_host_cpu = (self.host.cpu as f32 * self.load_factor).floor() as u16;
|
||||
let loaded_host_cpu = (self.host.cpu as f32 * self.load_factor.cpu).floor() as u16;
|
||||
loaded_host_cpu.saturating_sub(self.cpu)
|
||||
}
|
||||
|
||||
/// Memory usage as a percentage
|
||||
pub fn memory_load(&self) -> f32 {
|
||||
self.memory as f32 / (self.host.memory as f32 * self.load_factor)
|
||||
self.memory as f32 / (self.host.memory as f32 * self.load_factor.memory)
|
||||
}
|
||||
|
||||
/// Total available bytes of memory
|
||||
pub fn available_memory(&self) -> u64 {
|
||||
let loaded_host_memory = (self.host.memory as f64 * self.load_factor as f64).floor() as u64;
|
||||
let loaded_host_memory =
|
||||
(self.host.memory as f64 * self.load_factor.memory as f64).floor() as u64;
|
||||
loaded_host_memory.saturating_sub(self.memory)
|
||||
}
|
||||
|
||||
@ -225,6 +268,7 @@ impl HostCapacity {
|
||||
.disks
|
||||
.iter()
|
||||
.any(|d| d.available_capacity() >= template.disk_size())
|
||||
&& self.ranges.iter().any(|r| r.available_capacity() >= 1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -251,6 +295,31 @@ impl DiskCapacity {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IPRangeCapacity {
|
||||
/// IP Range
|
||||
pub range: IpRange,
|
||||
/// Number of allocated IPs
|
||||
pub usage: u128,
|
||||
}
|
||||
|
||||
impl IPRangeCapacity {
|
||||
/// Total number of IPs free
|
||||
pub fn available_capacity(&self) -> u128 {
|
||||
let net: IpNetwork = self.range.cidr.parse().unwrap();
|
||||
|
||||
match net.size() {
|
||||
NetworkSize::V4(s) => (s as u128).saturating_sub(self.usage),
|
||||
NetworkSize::V6(s) => s.saturating_sub(self.usage),
|
||||
}
|
||||
.saturating_sub(if self.range.use_full_range {
|
||||
1 // gw
|
||||
} else {
|
||||
3 // first/last/gw
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -259,7 +328,11 @@ mod tests {
|
||||
#[test]
|
||||
fn loads() {
|
||||
let cap = HostCapacity {
|
||||
load_factor: 2.0,
|
||||
load_factor: LoadFactors {
|
||||
cpu: 2.0,
|
||||
memory: 3.0,
|
||||
disk: 4.0,
|
||||
},
|
||||
host: VmHost {
|
||||
cpu: 100,
|
||||
memory: 100,
|
||||
@ -268,23 +341,41 @@ mod tests {
|
||||
cpu: 8,
|
||||
memory: 8,
|
||||
disks: vec![DiskCapacity {
|
||||
load_factor: 2.0,
|
||||
load_factor: 4.0,
|
||||
disk: VmHostDisk {
|
||||
size: 100,
|
||||
..Default::default()
|
||||
},
|
||||
usage: 8,
|
||||
}],
|
||||
ranges: vec![IPRangeCapacity {
|
||||
range: IpRange {
|
||||
id: 1,
|
||||
cidr: "10.0.0.0/24".to_string(),
|
||||
gateway: "10.0.0.1".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
..Default::default()
|
||||
},
|
||||
usage: 69,
|
||||
}],
|
||||
};
|
||||
|
||||
// load factor halves load values 8/100 * (1/load_factor)
|
||||
assert_eq!(cap.load(), 0.04);
|
||||
assert_eq!(cap.cpu_load(), 0.04);
|
||||
assert_eq!(cap.memory_load(), 0.04);
|
||||
assert_eq!(cap.disk_load(), 0.04);
|
||||
// load factor doubles memory to 200, 200 - 8
|
||||
assert_eq!(cap.available_memory(), 192);
|
||||
assert_eq!(cap.cpu_load(), 8.0 / 200.0);
|
||||
assert_eq!(cap.memory_load(), 8.0 / 300.0);
|
||||
assert_eq!(cap.disk_load(), 8.0 / 400.0);
|
||||
assert_eq!(
|
||||
cap.load(),
|
||||
((8.0 / 200.0) + (8.0 / 300.0) + (8.0 / 400.0)) / 3.0
|
||||
);
|
||||
// load factor doubles memory to 300, 300 - 8
|
||||
assert_eq!(cap.available_memory(), 292);
|
||||
assert_eq!(cap.available_cpu(), 192);
|
||||
for r in cap.ranges {
|
||||
assert_eq!(r.usage, 69);
|
||||
assert_eq!(r.available_capacity(), 256 - 3 - 69);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
858
lnvps_api/src/provisioner/lnvps.rs
Normal file
858
lnvps_api/src/provisioner/lnvps.rs
Normal file
@ -0,0 +1,858 @@
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::exchange::{Currency, CurrencyAmount, ExchangeRateService};
|
||||
use crate::fiat::FiatPaymentService;
|
||||
use crate::host::{get_host_client, FullVmInfo};
|
||||
use crate::lightning::{AddInvoiceRequest, LightningNode};
|
||||
use crate::provisioner::{
|
||||
AvailableIp, CostResult, HostCapacityService, NetworkProvisioner, PricingEngine,
|
||||
};
|
||||
use crate::router::{ArpEntry, MikrotikRouter, OvhDedicatedServerVMacRouter, Router};
|
||||
use crate::settings::{ProvisionerConfig, Settings};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use chrono::Utc;
|
||||
use ipnetwork::IpNetwork;
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::{
|
||||
AccessPolicy, IpRangeAllocationMode, LNVpsDb, NetworkAccessPolicy, PaymentMethod, RouterKind,
|
||||
Vm, VmCustomTemplate, VmHost, VmIpAssignment, VmPayment,
|
||||
};
|
||||
use log::{info, warn};
|
||||
use nostr::util::hex;
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Main provisioner class for LNVPS
|
||||
///
|
||||
/// Does all the hard work and logic for creating / expiring VM's
|
||||
pub struct LNVpsProvisioner {
|
||||
read_only: bool,
|
||||
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
node: Arc<dyn LightningNode>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
tax_rates: HashMap<CountryCode, f32>,
|
||||
|
||||
dns: Option<Arc<dyn DnsServer>>,
|
||||
revolut: Option<Arc<dyn FiatPaymentService>>,
|
||||
|
||||
/// Forward zone ID used for all VM's
|
||||
/// passed to the DNSServer type
|
||||
forward_zone_id: Option<String>,
|
||||
provisioner_config: ProvisionerConfig,
|
||||
}
|
||||
|
||||
impl LNVpsProvisioner {
|
||||
pub fn new(
|
||||
settings: Settings,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
node: Arc<dyn LightningNode>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
node,
|
||||
rates,
|
||||
dns: settings.get_dns().expect("dns config"),
|
||||
revolut: settings.get_revolut().expect("revolut config"),
|
||||
tax_rates: settings.tax_rate,
|
||||
provisioner_config: settings.provisioner,
|
||||
read_only: settings.read_only,
|
||||
forward_zone_id: settings.dns.map(|z| z.forward_zone_id),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_router(&self, router_id: u64) -> Result<Arc<dyn Router>> {
|
||||
#[cfg(test)]
|
||||
return Ok(Arc::new(crate::mocks::MockRouter::new()));
|
||||
|
||||
let cfg = self.db.get_router(router_id).await?;
|
||||
match cfg.kind {
|
||||
RouterKind::Mikrotik => {
|
||||
let mut t_split = cfg.token.split(":");
|
||||
let (username, password) = (
|
||||
t_split.next().context("Invalid username:password")?,
|
||||
t_split.next().context("Invalid username:password")?,
|
||||
);
|
||||
Ok(Arc::new(MikrotikRouter::new(&cfg.url, username, password)))
|
||||
}
|
||||
RouterKind::OvhAdditionalIp => Ok(Arc::new(
|
||||
OvhDedicatedServerVMacRouter::new(&cfg.url, &cfg.name, &cfg.token).await?,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create or Update access policy for a given ip assignment, does not save to database!
|
||||
pub async fn update_access_policy(
|
||||
&self,
|
||||
assignment: &mut VmIpAssignment,
|
||||
policy: &AccessPolicy,
|
||||
) -> Result<()> {
|
||||
let ip = IpNetwork::from_str(&assignment.ip)?;
|
||||
if matches!(policy.kind, NetworkAccessPolicy::StaticArp) && ip.is_ipv4() {
|
||||
let router = self
|
||||
.get_router(
|
||||
policy
|
||||
.router_id
|
||||
.context("Cannot apply static arp policy with no router")?,
|
||||
)
|
||||
.await?;
|
||||
let vm = self.db.get_vm(assignment.vm_id).await?;
|
||||
let entry = ArpEntry::new(&vm, assignment, policy.interface.clone())?;
|
||||
let arp = if let Some(_id) = &assignment.arp_ref {
|
||||
router.update_arp_entry(&entry).await?
|
||||
} else {
|
||||
router.add_arp_entry(&entry).await?
|
||||
};
|
||||
ensure!(arp.id.is_some(), "ARP id was empty");
|
||||
assignment.arp_ref = arp.id;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove an access policy for a given ip assignment, does not save to database!
|
||||
pub async fn remove_access_policy(
|
||||
&self,
|
||||
assignment: &mut VmIpAssignment,
|
||||
policy: &AccessPolicy,
|
||||
) -> Result<()> {
|
||||
let ip = IpNetwork::from_str(&assignment.ip)?;
|
||||
if matches!(policy.kind, NetworkAccessPolicy::StaticArp) && ip.is_ipv4() {
|
||||
let router = self
|
||||
.get_router(
|
||||
policy
|
||||
.router_id
|
||||
.context("Cannot apply static arp policy with no router")?,
|
||||
)
|
||||
.await?;
|
||||
let id = if let Some(id) = &assignment.arp_ref {
|
||||
Some(id.clone())
|
||||
} else {
|
||||
warn!("ARP REF not found, using arp list");
|
||||
|
||||
let ent = router.list_arp_entry().await?;
|
||||
if let Some(ent) = ent.iter().find(|e| e.address == assignment.ip) {
|
||||
ent.id.clone()
|
||||
} else {
|
||||
warn!("ARP entry not found, skipping");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(id) = id {
|
||||
if let Err(e) = router.remove_arp_entry(&id).await {
|
||||
warn!("Failed to remove arp entry, skipping: {}", e);
|
||||
}
|
||||
}
|
||||
assignment.arp_ref = None;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete DNS on the dns server, does not save to database!
|
||||
pub async fn remove_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// Delete forward/reverse dns
|
||||
if let Some(dns) = &self.dns {
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
|
||||
if let (Some(z), Some(_ref)) = (&range.reverse_zone_id, &assignment.dns_reverse_ref) {
|
||||
let rev = BasicRecord::reverse(assignment)?;
|
||||
if let Err(e) = dns.delete_record(z, &rev).await {
|
||||
warn!("Failed to delete reverse record: {}", e);
|
||||
}
|
||||
assignment.dns_reverse_ref = None;
|
||||
assignment.dns_reverse = None;
|
||||
}
|
||||
if let (Some(z), Some(_ref)) = (&self.forward_zone_id, &assignment.dns_forward_ref) {
|
||||
let rev = BasicRecord::forward(assignment)?;
|
||||
if let Err(e) = dns.delete_record(z, &rev).await {
|
||||
warn!("Failed to delete forward record: {}", e);
|
||||
}
|
||||
assignment.dns_forward_ref = None;
|
||||
assignment.dns_forward = None;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update DNS on the dns server, does not save to database!
|
||||
pub async fn update_forward_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
if let (Some(z), Some(dns)) = (&self.forward_zone_id, &self.dns) {
|
||||
let fwd = BasicRecord::forward(assignment)?;
|
||||
let ret_fwd = if fwd.id.is_some() {
|
||||
dns.update_record(z, &fwd).await?
|
||||
} else {
|
||||
dns.add_record(z, &fwd).await?
|
||||
};
|
||||
assignment.dns_forward = Some(ret_fwd.name);
|
||||
assignment.dns_forward_ref = Some(ret_fwd.id.context("Record id is missing")?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update DNS on the dns server, does not save to database!
|
||||
pub async fn update_reverse_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
if let Some(dns) = &self.dns {
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
if let Some(z) = &range.reverse_zone_id {
|
||||
let ret_rev = if assignment.dns_reverse_ref.is_some() {
|
||||
dns.update_record(z, &BasicRecord::reverse(assignment)?)
|
||||
.await?
|
||||
} else {
|
||||
dns.add_record(z, &BasicRecord::reverse_to_fwd(assignment)?)
|
||||
.await?
|
||||
};
|
||||
assignment.dns_reverse = Some(ret_rev.value);
|
||||
assignment.dns_reverse_ref = Some(ret_rev.id.context("Record id is missing")?);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete all ip assignments for a given vm
|
||||
pub async fn delete_ip_assignments(&self, vm_id: u64) -> Result<()> {
|
||||
let ips = self.db.list_vm_ip_assignments(vm_id).await?;
|
||||
for mut ip in ips {
|
||||
// load range info to check access policy
|
||||
let range = self.db.get_ip_range(ip.ip_range_id).await?;
|
||||
if let Some(ap) = range.access_policy_id {
|
||||
let ap = self.db.get_access_policy(ap).await?;
|
||||
// remove access policy
|
||||
self.remove_access_policy(&mut ip, &ap).await?;
|
||||
}
|
||||
// remove dns
|
||||
self.remove_ip_dns(&mut ip).await?;
|
||||
// save arp/dns changes
|
||||
self.db.update_vm_ip_assignment(&ip).await?;
|
||||
}
|
||||
// mark as deleted
|
||||
self.db.delete_vm_ip_assignment(vm_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn save_ip_assignment(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// load range info to check access policy
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
if let Some(ap) = range.access_policy_id {
|
||||
let ap = self.db.get_access_policy(ap).await?;
|
||||
// apply access policy
|
||||
self.update_access_policy(assignment, &ap).await?;
|
||||
}
|
||||
|
||||
// Add DNS records
|
||||
self.update_forward_ip_dns(assignment).await?;
|
||||
self.update_reverse_ip_dns(assignment).await?;
|
||||
|
||||
// save to db
|
||||
self.db.insert_vm_ip_assignment(assignment).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_mac_for_assignment(
|
||||
&self,
|
||||
host: &VmHost,
|
||||
vm: &Vm,
|
||||
assignment: &VmIpAssignment,
|
||||
) -> Result<ArpEntry> {
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
|
||||
// ask router first if it wants to set the MAC
|
||||
if let Some(ap) = range.access_policy_id {
|
||||
let ap = self.db.get_access_policy(ap).await?;
|
||||
if let Some(rid) = ap.router_id {
|
||||
let router = self.get_router(rid).await?;
|
||||
|
||||
if let Some(mac) = router
|
||||
.generate_mac(&assignment.ip, &format!("VM{}", assignment.vm_id))
|
||||
.await?
|
||||
{
|
||||
return Ok(mac);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ask the host next to generate the mac
|
||||
let client = get_host_client(host, &self.provisioner_config)?;
|
||||
let mac = client.generate_mac(vm).await?;
|
||||
Ok(ArpEntry {
|
||||
id: None,
|
||||
address: assignment.ip.clone(),
|
||||
mac_address: mac,
|
||||
interface: None,
|
||||
comment: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn assign_available_v6_to_vm(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
v6: &mut AvailableIp,
|
||||
) -> Result<VmIpAssignment> {
|
||||
match v6.mode {
|
||||
// it's a bit awkward, but we need to update the IP AFTER its been picked
|
||||
// simply because sometimes we don't know the MAC of the NIC yet
|
||||
IpRangeAllocationMode::SlaacEui64 => {
|
||||
let mac = NetworkProvisioner::parse_mac(&vm.mac_address)?;
|
||||
let addr = NetworkProvisioner::calculate_eui64(&mac, &v6.ip)?;
|
||||
v6.ip = IpNetwork::new(addr, v6.ip.prefix())?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let mut assignment = VmIpAssignment {
|
||||
vm_id: vm.id,
|
||||
ip_range_id: v6.range_id,
|
||||
ip: v6.ip.ip().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
self.save_ip_assignment(&mut assignment).await?;
|
||||
Ok(assignment)
|
||||
}
|
||||
|
||||
async fn allocate_ips(&self, vm_id: u64) -> Result<Vec<VmIpAssignment>> {
|
||||
let mut vm = self.db.get_vm(vm_id).await?;
|
||||
let existing_ips = self.db.list_vm_ip_assignments(vm_id).await?;
|
||||
if !existing_ips.is_empty() {
|
||||
return Ok(existing_ips);
|
||||
}
|
||||
|
||||
// Use random network provisioner
|
||||
let network = NetworkProvisioner::new(self.db.clone());
|
||||
|
||||
let host = self.db.get_host(vm.host_id).await?;
|
||||
let ip = network.pick_ip_for_region(host.region_id).await?;
|
||||
let mut assignments = vec![];
|
||||
match ip.ip4 {
|
||||
Some(v4) => {
|
||||
let mut assignment = VmIpAssignment {
|
||||
vm_id: vm.id,
|
||||
ip_range_id: v4.range_id,
|
||||
ip: v4.ip.ip().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
//generate mac address from ip assignment
|
||||
let mac = self.get_mac_for_assignment(&host, &vm, &assignment).await?;
|
||||
vm.mac_address = mac.mac_address;
|
||||
assignment.arp_ref = mac.id; // store ref if we got one
|
||||
self.db.update_vm(&vm).await?;
|
||||
|
||||
self.save_ip_assignment(&mut assignment).await?;
|
||||
assignments.push(assignment);
|
||||
}
|
||||
/// TODO: add expected number of IPS per templates
|
||||
None => bail!("Cannot provision VM without an IPv4 address"),
|
||||
}
|
||||
if let Some(mut v6) = ip.ip6 {
|
||||
assignments.push(self.assign_available_v6_to_vm(&vm, &mut v6).await?);
|
||||
}
|
||||
|
||||
Ok(assignments)
|
||||
}
|
||||
|
||||
/// Do any necessary initialization
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
let hosts = self.db.list_hosts().await?;
|
||||
let images = self.db.list_os_image().await?;
|
||||
for host in hosts {
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
for image in &images {
|
||||
if let Err(e) = client.download_os_image(image).await {
|
||||
warn!(
|
||||
"Error downloading image {} on {}: {}",
|
||||
image.url, host.name, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get database handle
|
||||
pub fn get_db(&self) -> Arc<dyn LNVpsDb> {
|
||||
self.db.clone()
|
||||
}
|
||||
|
||||
/// Provision a new VM for a user on the database
|
||||
///
|
||||
/// Note:
|
||||
/// 1. Does not create a VM on the host machine
|
||||
/// 2. Does not assign any IP resources
|
||||
pub async fn provision(
|
||||
&self,
|
||||
user_id: u64,
|
||||
template_id: u64,
|
||||
image_id: u64,
|
||||
ssh_key_id: u64,
|
||||
ref_code: Option<String>,
|
||||
) -> Result<Vm> {
|
||||
let user = self.db.get_user(user_id).await?;
|
||||
let template = self.db.get_vm_template(template_id).await?;
|
||||
let image = self.db.get_os_image(image_id).await?;
|
||||
let ssh_key = self.db.get_user_ssh_key(ssh_key_id).await?;
|
||||
|
||||
// TODO: cache capacity somewhere
|
||||
let cap = HostCapacityService::new(self.db.clone());
|
||||
let host = cap
|
||||
.get_host_for_template(template.region_id, &template)
|
||||
.await?;
|
||||
|
||||
let pick_disk = if let Some(hd) = host.disks.first() {
|
||||
hd
|
||||
} else {
|
||||
bail!("No host disk found")
|
||||
};
|
||||
|
||||
let mut new_vm = Vm {
|
||||
id: 0,
|
||||
host_id: host.host.id,
|
||||
user_id: user.id,
|
||||
image_id: image.id,
|
||||
template_id: Some(template.id),
|
||||
custom_template_id: None,
|
||||
ssh_key_id: ssh_key.id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now(),
|
||||
disk_id: pick_disk.disk.id,
|
||||
mac_address: "ff:ff:ff:ff:ff:ff".to_string(),
|
||||
deleted: false,
|
||||
ref_code,
|
||||
};
|
||||
|
||||
let new_id = self.db.insert_vm(&new_vm).await?;
|
||||
new_vm.id = new_id;
|
||||
Ok(new_vm)
|
||||
}
|
||||
|
||||
/// Provision a new VM for a user on the database
|
||||
///
|
||||
/// Note:
|
||||
/// 1. Does not create a VM on the host machine
|
||||
/// 2. Does not assign any IP resources
|
||||
pub async fn provision_custom(
|
||||
&self,
|
||||
user_id: u64,
|
||||
template: VmCustomTemplate,
|
||||
image_id: u64,
|
||||
ssh_key_id: u64,
|
||||
ref_code: Option<String>,
|
||||
) -> Result<Vm> {
|
||||
let user = self.db.get_user(user_id).await?;
|
||||
let pricing = self.db.get_vm_template(template.pricing_id).await?;
|
||||
let image = self.db.get_os_image(image_id).await?;
|
||||
let ssh_key = self.db.get_user_ssh_key(ssh_key_id).await?;
|
||||
|
||||
// TODO: cache capacity somewhere
|
||||
let cap = HostCapacityService::new(self.db.clone());
|
||||
let host = cap
|
||||
.get_host_for_template(pricing.region_id, &template)
|
||||
.await?;
|
||||
|
||||
let pick_disk = if let Some(hd) = host.disks.first() {
|
||||
hd
|
||||
} else {
|
||||
bail!("No host disk found")
|
||||
};
|
||||
|
||||
// insert custom templates
|
||||
let template_id = self.db.insert_custom_vm_template(&template).await?;
|
||||
|
||||
let mut new_vm = Vm {
|
||||
id: 0,
|
||||
host_id: host.host.id,
|
||||
user_id: user.id,
|
||||
image_id: image.id,
|
||||
template_id: None,
|
||||
custom_template_id: Some(template_id),
|
||||
ssh_key_id: ssh_key.id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now(),
|
||||
disk_id: pick_disk.disk.id,
|
||||
mac_address: "ff:ff:ff:ff:ff:ff".to_string(),
|
||||
deleted: false,
|
||||
ref_code,
|
||||
};
|
||||
|
||||
let new_id = self.db.insert_vm(&new_vm).await?;
|
||||
new_vm.id = new_id;
|
||||
Ok(new_vm)
|
||||
}
|
||||
|
||||
/// Create a renewal payment
|
||||
pub async fn renew(&self, vm_id: u64, method: PaymentMethod) -> Result<VmPayment> {
|
||||
let pe = PricingEngine::new(self.db.clone(), self.rates.clone(), self.tax_rates.clone());
|
||||
let price = pe.get_vm_cost(vm_id, method).await?;
|
||||
self.price_to_payment(vm_id, method, price).await
|
||||
}
|
||||
|
||||
/// Renew a VM using a specific amount
|
||||
pub async fn renew_amount(&self, vm_id: u64, amount: CurrencyAmount, method: PaymentMethod) -> Result<VmPayment> {
|
||||
let pe = PricingEngine::new(self.db.clone(), self.rates.clone(), self.tax_rates.clone());
|
||||
let price = pe.get_cost_by_amount(vm_id, amount, method).await?;
|
||||
self.price_to_payment(vm_id, method, price).await
|
||||
}
|
||||
|
||||
async fn price_to_payment(
|
||||
&self,
|
||||
vm_id: u64,
|
||||
method: PaymentMethod,
|
||||
price: CostResult,
|
||||
) -> Result<VmPayment> {
|
||||
match price {
|
||||
CostResult::Existing(p) => Ok(p),
|
||||
CostResult::New {
|
||||
amount,
|
||||
currency,
|
||||
time_value,
|
||||
new_expiry,
|
||||
rate,
|
||||
tax,
|
||||
} => {
|
||||
let desc = format!("VM renewal {vm_id} to {new_expiry}");
|
||||
let vm_payment = match method {
|
||||
PaymentMethod::Lightning => {
|
||||
ensure!(
|
||||
currency == Currency::BTC,
|
||||
"Cannot create invoices for non-BTC currency"
|
||||
);
|
||||
const INVOICE_EXPIRE: u64 = 600;
|
||||
let total_amount = amount + tax;
|
||||
info!(
|
||||
"Creating invoice for {vm_id} for {} sats",
|
||||
total_amount / 1000
|
||||
);
|
||||
let invoice = self
|
||||
.node
|
||||
.add_invoice(AddInvoiceRequest {
|
||||
memo: Some(desc),
|
||||
amount: total_amount,
|
||||
expire: Some(INVOICE_EXPIRE as u32),
|
||||
})
|
||||
.await?;
|
||||
VmPayment {
|
||||
id: hex::decode(invoice.payment_hash)?,
|
||||
vm_id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now().add(Duration::from_secs(INVOICE_EXPIRE)),
|
||||
amount,
|
||||
tax,
|
||||
currency: currency.to_string(),
|
||||
payment_method: method,
|
||||
time_value,
|
||||
is_paid: false,
|
||||
rate,
|
||||
external_data: invoice.pr,
|
||||
external_id: invoice.external_id,
|
||||
}
|
||||
}
|
||||
PaymentMethod::Revolut => {
|
||||
let rev = if let Some(r) = &self.revolut {
|
||||
r
|
||||
} else {
|
||||
bail!("Revolut not configured")
|
||||
};
|
||||
ensure!(
|
||||
currency != Currency::BTC,
|
||||
"Cannot create revolut orders for BTC currency"
|
||||
);
|
||||
let order = rev
|
||||
.create_order(&desc, CurrencyAmount::from_u64(currency, amount + tax))
|
||||
.await?;
|
||||
let new_id: [u8; 32] = rand::random();
|
||||
VmPayment {
|
||||
id: new_id.to_vec(),
|
||||
vm_id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now().add(Duration::from_secs(3600)),
|
||||
amount,
|
||||
tax,
|
||||
currency: currency.to_string(),
|
||||
payment_method: method,
|
||||
time_value,
|
||||
is_paid: false,
|
||||
rate,
|
||||
external_data: order.raw_data,
|
||||
external_id: Some(order.external_id),
|
||||
}
|
||||
}
|
||||
PaymentMethod::Paypal => todo!(),
|
||||
};
|
||||
|
||||
self.db.insert_vm_payment(&vm_payment).await?;
|
||||
|
||||
Ok(vm_payment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vm on the host as configured by the template
|
||||
pub async fn spawn_vm(&self, vm_id: u64) -> Result<()> {
|
||||
if self.read_only {
|
||||
bail!("Cant spawn VM's in read-only mode")
|
||||
}
|
||||
// setup network by allocating some IP space
|
||||
self.allocate_ips(vm_id).await?;
|
||||
|
||||
// load full info
|
||||
let info = FullVmInfo::load(vm_id, self.db.clone()).await?;
|
||||
|
||||
// load host client
|
||||
let host = self.db.get_host(info.vm.host_id).await?;
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
client.create_vm(&info).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a VM and its associated resources
|
||||
pub async fn delete_vm(&self, vm_id: u64) -> Result<()> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
let host = self.db.get_host(vm.host_id).await?;
|
||||
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
if let Err(e) = client.delete_vm(&vm).await {
|
||||
warn!("Failed to delete VM: {}", e);
|
||||
}
|
||||
|
||||
self.delete_ip_assignments(vm_id).await?;
|
||||
self.db.delete_vm(vm_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop a running VM
|
||||
pub async fn stop_vm(&self, vm_id: u64) -> Result<()> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
let host = self.db.get_host(vm.host_id).await?;
|
||||
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
client.stop_vm(&vm).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::exchange::{DefaultRateCache, Ticker};
|
||||
use crate::mocks::{MockDb, MockDnsServer, MockExchangeRate, MockNode, MockRouter};
|
||||
use crate::settings::mock_settings;
|
||||
use lnvps_db::{DiskInterface, DiskType, User, UserSshKey, VmTemplate};
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
|
||||
const ROUTER_BRIDGE: &str = "bridge1";
|
||||
|
||||
pub fn settings() -> Settings {
|
||||
let mut settings = mock_settings();
|
||||
settings
|
||||
}
|
||||
|
||||
async fn add_user(db: &Arc<MockDb>) -> Result<(User, UserSshKey)> {
|
||||
let pubkey: [u8; 32] = rand::random();
|
||||
|
||||
let user_id = db.upsert_user(&pubkey).await?;
|
||||
let mut new_key = UserSshKey {
|
||||
id: 0,
|
||||
name: "test-key".to_string(),
|
||||
user_id,
|
||||
created: Default::default(),
|
||||
key_data: "ssh-rsa AAA==".to_string(),
|
||||
};
|
||||
let ssh_key = db.insert_user_ssh_key(&new_key).await?;
|
||||
new_key.id = ssh_key;
|
||||
Ok((db.get_user(user_id).await?, new_key))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn basic() -> Result<()> {
|
||||
let settings = settings();
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::default());
|
||||
let rates = Arc::new(MockExchangeRate::new());
|
||||
const MOCK_RATE: f32 = 69_420.0;
|
||||
rates.set_rate(Ticker::btc_rate("EUR")?, MOCK_RATE).await;
|
||||
|
||||
// add static arp policy
|
||||
{
|
||||
let mut r = db.router.lock().await;
|
||||
r.insert(
|
||||
1,
|
||||
lnvps_db::Router {
|
||||
id: 1,
|
||||
name: "mock-router".to_string(),
|
||||
enabled: true,
|
||||
kind: RouterKind::Mikrotik,
|
||||
url: "https://localhost".to_string(),
|
||||
token: "username:password".to_string(),
|
||||
},
|
||||
);
|
||||
let mut p = db.access_policy.lock().await;
|
||||
p.insert(
|
||||
1,
|
||||
AccessPolicy {
|
||||
id: 1,
|
||||
name: "static-arp".to_string(),
|
||||
kind: NetworkAccessPolicy::StaticArp,
|
||||
router_id: Some(1),
|
||||
interface: Some(ROUTER_BRIDGE.to_string()),
|
||||
},
|
||||
);
|
||||
let mut i = db.ip_range.lock().await;
|
||||
let r = i.get_mut(&1).unwrap();
|
||||
r.access_policy_id = Some(1);
|
||||
r.reverse_zone_id = Some("mock-rev-zone-id".to_string());
|
||||
let r = i.get_mut(&2).unwrap();
|
||||
r.reverse_zone_id = Some("mock-v6-rev-zone-id".to_string());
|
||||
}
|
||||
|
||||
let dns = MockDnsServer::new();
|
||||
let provisioner = LNVpsProvisioner::new(settings, db.clone(), node.clone(), rates.clone());
|
||||
|
||||
let (user, ssh_key) = add_user(&db).await?;
|
||||
let vm = provisioner
|
||||
.provision(user.id, 1, 1, ssh_key.id, Some("mock-ref".to_string()))
|
||||
.await?;
|
||||
println!("{:?}", vm);
|
||||
|
||||
// renew vm
|
||||
let payment = provisioner.renew(vm.id, PaymentMethod::Lightning).await?;
|
||||
assert_eq!(vm.id, payment.vm_id);
|
||||
assert_eq!(payment.tax, (payment.amount as f64 * 0.01).floor() as u64);
|
||||
|
||||
// check invoice amount matches amount+tax
|
||||
let inv = node.invoices.lock().await;
|
||||
if let Some(i) = inv.get(&hex::encode(payment.id)) {
|
||||
assert_eq!(i.amount, payment.amount + payment.tax);
|
||||
} else {
|
||||
bail!("Invoice doesnt exist");
|
||||
}
|
||||
|
||||
// spawn vm
|
||||
provisioner.spawn_vm(vm.id).await?;
|
||||
|
||||
let vm = db.get_vm(vm.id).await?;
|
||||
// check resources
|
||||
let router = MockRouter::new();
|
||||
let arp = router.list_arp_entry().await?;
|
||||
assert_eq!(1, arp.len());
|
||||
let arp = arp.first().unwrap();
|
||||
assert_eq!(&vm.mac_address, &arp.mac_address);
|
||||
assert_eq!(vm.ref_code, Some("mock-ref".to_string()));
|
||||
assert_eq!(ROUTER_BRIDGE, arp.interface.as_ref().unwrap());
|
||||
println!("{:?}", arp);
|
||||
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
assert_eq!(2, ips.len());
|
||||
|
||||
// lookup v4 ip
|
||||
let v4 = ips.iter().find(|r| r.ip_range_id == 1).unwrap();
|
||||
println!("{:?}", v4);
|
||||
assert_eq!(v4.ip, arp.address);
|
||||
assert_eq!(v4.ip_range_id, 1);
|
||||
assert_eq!(v4.vm_id, vm.id);
|
||||
assert!(v4.dns_forward.is_some());
|
||||
assert!(v4.dns_reverse.is_some());
|
||||
assert!(v4.dns_reverse_ref.is_some());
|
||||
assert!(v4.dns_forward_ref.is_some());
|
||||
assert_eq!(v4.dns_reverse, v4.dns_forward);
|
||||
|
||||
// assert IP address is not CIDR
|
||||
assert!(IpAddr::from_str(&v4.ip).is_ok());
|
||||
assert!(!v4.ip.ends_with("/8"));
|
||||
assert!(!v4.ip.ends_with("/24"));
|
||||
|
||||
// lookup v6 ip
|
||||
let v6 = ips.iter().find(|r| r.ip_range_id == 2).unwrap();
|
||||
println!("{:?}", v6);
|
||||
assert_eq!(v6.ip_range_id, 2);
|
||||
assert_eq!(v6.vm_id, vm.id);
|
||||
assert!(v6.dns_forward.is_some());
|
||||
assert!(v6.dns_reverse.is_some());
|
||||
assert!(v6.dns_reverse_ref.is_some());
|
||||
assert!(v6.dns_forward_ref.is_some());
|
||||
assert_eq!(v6.dns_reverse, v6.dns_forward);
|
||||
|
||||
// test zones have dns entries
|
||||
{
|
||||
let zones = dns.zones.lock().await;
|
||||
assert_eq!(zones.get("mock-rev-zone-id").unwrap().len(), 1);
|
||||
assert_eq!(zones.get("mock-v6-rev-zone-id").unwrap().len(), 1);
|
||||
assert_eq!(zones.get("mock-forward-zone-id").unwrap().len(), 2);
|
||||
|
||||
let v6 = zones
|
||||
.get("mock-v6-rev-zone-id")
|
||||
.unwrap()
|
||||
.iter()
|
||||
.next()
|
||||
.unwrap();
|
||||
assert_eq!(v6.1.kind, "PTR");
|
||||
assert!(v6.1.name.ends_with("0.0.d.f.ip6.arpa"));
|
||||
}
|
||||
|
||||
// now expire
|
||||
provisioner.delete_vm(vm.id).await?;
|
||||
|
||||
// test arp/dns is removed
|
||||
let arp = router.list_arp_entry().await?;
|
||||
assert!(arp.is_empty());
|
||||
|
||||
// test dns entries are deleted
|
||||
{
|
||||
let zones = dns.zones.lock().await;
|
||||
assert_eq!(zones.get("mock-rev-zone-id").unwrap().len(), 0);
|
||||
assert_eq!(zones.get("mock-forward-zone-id").unwrap().len(), 0);
|
||||
}
|
||||
|
||||
// ensure IPS are deleted
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
for ip in ips {
|
||||
println!("{:?}", ip);
|
||||
assert!(ip.arp_ref.is_none());
|
||||
assert!(ip.dns_forward.is_none());
|
||||
assert!(ip.dns_reverse.is_none());
|
||||
assert!(ip.dns_reverse_ref.is_none());
|
||||
assert!(ip.dns_forward_ref.is_none());
|
||||
assert!(ip.deleted);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_capacity() -> Result<()> {
|
||||
let settings = settings();
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::default());
|
||||
let rates = Arc::new(DefaultRateCache::default());
|
||||
let prov = LNVpsProvisioner::new(settings.clone(), db.clone(), node.clone(), rates.clone());
|
||||
|
||||
let large_template = VmTemplate {
|
||||
id: 0,
|
||||
name: "mock-large-template".to_string(),
|
||||
enabled: true,
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
cpu: 64,
|
||||
memory: 512 * crate::GB,
|
||||
disk_size: 20 * crate::TB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
region_id: 1,
|
||||
};
|
||||
let id = db.insert_vm_template(&large_template).await?;
|
||||
|
||||
let (user, ssh_key) = add_user(&db).await?;
|
||||
|
||||
let prov = prov.provision(user.id, id, 1, ssh_key.id, None).await;
|
||||
assert!(prov.is_err());
|
||||
if let Err(e) = prov {
|
||||
println!("{}", e);
|
||||
assert!(e.to_string().to_lowercase().contains("no available host"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
231
lnvps_api/src/provisioner/network.rs
Normal file
231
lnvps_api/src/provisioner/network.rs
Normal file
@ -0,0 +1,231 @@
|
||||
use anyhow::{bail, Context, Result};
|
||||
use clap::builder::TypedValueParser;
|
||||
use ipnetwork::{IpNetwork, Ipv6Network};
|
||||
use lnvps_db::{IpRange, IpRangeAllocationMode, LNVpsDb};
|
||||
use log::warn;
|
||||
use rand::prelude::IteratorRandom;
|
||||
use rocket::form::validate::Contains;
|
||||
use rocket::http::ext::IntoCollection;
|
||||
use std::collections::HashSet;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AvailableIps {
|
||||
pub ip4: Option<AvailableIp>,
|
||||
pub ip6: Option<AvailableIp>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AvailableIp {
|
||||
pub ip: IpNetwork,
|
||||
pub gateway: IpNetwork,
|
||||
pub range_id: u64,
|
||||
pub region_id: u64,
|
||||
pub mode: IpRangeAllocationMode,
|
||||
}
|
||||
|
||||
/// Handles picking available IPs
|
||||
#[derive(Clone)]
|
||||
pub struct NetworkProvisioner {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
}
|
||||
|
||||
impl NetworkProvisioner {
|
||||
pub fn new(db: Arc<dyn LNVpsDb>) -> Self {
|
||||
Self { db }
|
||||
}
|
||||
|
||||
/// Pick an IP from one of the available ip ranges
|
||||
/// This method MUST return a free IP which can be used
|
||||
pub async fn pick_ip_for_region(&self, region_id: u64) -> Result<AvailableIps> {
|
||||
let ip_ranges = self.db.list_ip_range_in_region(region_id).await?;
|
||||
if ip_ranges.is_empty() {
|
||||
bail!("No ip range found in this region");
|
||||
}
|
||||
|
||||
let mut ret = AvailableIps {
|
||||
ip4: None,
|
||||
ip6: None,
|
||||
};
|
||||
for range in ip_ranges {
|
||||
let range_cidr: IpNetwork = range.cidr.parse()?;
|
||||
if ret.ip4.is_none() && range_cidr.is_ipv4() {
|
||||
ret.ip4 = match self.pick_ip_from_range(&range).await {
|
||||
Ok(i) => Some(i),
|
||||
Err(e) => {
|
||||
warn!("Failed to pick ip range: {} {}", range.cidr, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
if ret.ip6.is_none() && range_cidr.is_ipv6() {
|
||||
ret.ip6 = match self.pick_ip_from_range(&range).await {
|
||||
Ok(i) => Some(i),
|
||||
Err(e) => {
|
||||
warn!("Failed to pick ip range: {} {}", range.cidr, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ret.ip4.is_none() && ret.ip6.is_none() {
|
||||
bail!("No IPs available in this region");
|
||||
} else {
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn pick_ip_from_range(&self, range: &IpRange) -> Result<AvailableIp> {
|
||||
let range_cidr: IpNetwork = range.cidr.parse()?;
|
||||
let ips = self.db.list_vm_ip_assignments_in_range(range.id).await?;
|
||||
let mut ips: HashSet<IpAddr> = ips.iter().map_while(|i| i.ip.parse().ok()).collect();
|
||||
|
||||
let gateway: IpNetwork = range.gateway.parse()?;
|
||||
|
||||
// mark some IPS as always used
|
||||
// Namely:
|
||||
// .0 & .255 of /24 (first and last)
|
||||
// gateway ip of the range
|
||||
if !range.use_full_range && range_cidr.is_ipv4() {
|
||||
ips.insert(range_cidr.iter().next().unwrap());
|
||||
ips.insert(range_cidr.iter().last().unwrap());
|
||||
}
|
||||
ips.insert(gateway.ip());
|
||||
|
||||
// pick an IP from the range
|
||||
let ip_pick = {
|
||||
match &range.allocation_mode {
|
||||
IpRangeAllocationMode::Sequential => range_cidr
|
||||
.iter()
|
||||
.find(|i| !ips.contains(i))
|
||||
.and_then(|i| IpNetwork::new(i, range_cidr.prefix()).ok()),
|
||||
IpRangeAllocationMode::Random => {
|
||||
let mut rng = rand::rng();
|
||||
loop {
|
||||
if let Some(i) = range_cidr.iter().choose(&mut rng) {
|
||||
if !ips.contains(&i) {
|
||||
break IpNetwork::new(i, range_cidr.prefix()).ok();
|
||||
}
|
||||
} else {
|
||||
break None;
|
||||
}
|
||||
}
|
||||
}
|
||||
IpRangeAllocationMode::SlaacEui64 => {
|
||||
if range_cidr.network().is_ipv4() {
|
||||
bail!("Cannot create EUI-64 from IPv4 address")
|
||||
} else {
|
||||
// basically always free ips here
|
||||
Some(range_cidr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.context("No ips available in range")?;
|
||||
|
||||
Ok(AvailableIp {
|
||||
range_id: range.id,
|
||||
gateway,
|
||||
ip: ip_pick,
|
||||
region_id: range.region_id,
|
||||
mode: range.allocation_mode.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn calculate_eui64(mac: &[u8; 6], prefix: &IpNetwork) -> Result<IpAddr> {
|
||||
if prefix.is_ipv4() {
|
||||
bail!("Prefix must be IPv6".to_string())
|
||||
}
|
||||
|
||||
let mut eui64 = [0u8; 8];
|
||||
eui64[0] = mac[0] ^ 0x02;
|
||||
eui64[1] = mac[1];
|
||||
eui64[2] = mac[2];
|
||||
eui64[3] = 0xFF;
|
||||
eui64[4] = 0xFE;
|
||||
eui64[5] = mac[3];
|
||||
eui64[6] = mac[4];
|
||||
eui64[7] = mac[5];
|
||||
|
||||
// Combine prefix with EUI-64 interface identifier
|
||||
let mut prefix_bytes = match prefix.network() {
|
||||
IpAddr::V4(_) => bail!("Not supported"),
|
||||
IpAddr::V6(v6) => v6.octets(),
|
||||
};
|
||||
// copy EUI-64 into prefix
|
||||
prefix_bytes[8..16].copy_from_slice(&eui64);
|
||||
|
||||
let ipv6_addr = Ipv6Addr::from(prefix_bytes);
|
||||
Ok(IpAddr::V6(ipv6_addr))
|
||||
}
|
||||
|
||||
pub fn parse_mac(mac: &str) -> Result<[u8; 6]> {
|
||||
Ok(hex::decode(mac.replace(":", ""))?.as_slice().try_into()?)
|
||||
}
|
||||
|
||||
pub fn ipv6_to_ptr(addr: &Ipv6Addr) -> Result<String> {
|
||||
let octets = addr.octets();
|
||||
let mut nibbles = Vec::new();
|
||||
for byte in octets.iter().rev() {
|
||||
let high_nibble = (byte >> 4) & 0x0Fu8;
|
||||
let low_nibble = byte & 0x0F;
|
||||
nibbles.push(format!("{:x}", low_nibble));
|
||||
nibbles.push(format!("{:x}", high_nibble));
|
||||
}
|
||||
Ok(format!("{}.ip6.arpa", nibbles.join(".")))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::mocks::*;
|
||||
|
||||
use lnvps_db::VmIpAssignment;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[tokio::test]
|
||||
async fn pick_seq_ip_for_region_test() {
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(MockDb::default());
|
||||
let mgr = NetworkProvisioner::new(db.clone());
|
||||
|
||||
let mac: [u8; 6] = [0xff, 0xff, 0xff, 0xfa, 0xfb, 0xfc];
|
||||
let gateway = IpNetwork::from_str("10.0.0.1/8").unwrap();
|
||||
let first = IpAddr::from_str("10.0.0.2").unwrap();
|
||||
let second = IpAddr::from_str("10.0.0.3").unwrap();
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(v4.region_id, 1);
|
||||
assert_eq!(first, v4.ip.ip());
|
||||
assert_eq!(gateway, v4.gateway);
|
||||
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(1, v4.region_id);
|
||||
assert_eq!(first, v4.ip.ip());
|
||||
db.insert_vm_ip_assignment(&VmIpAssignment {
|
||||
id: 0,
|
||||
vm_id: 0,
|
||||
ip_range_id: v4.range_id,
|
||||
ip: v4.ip.ip().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.expect("Could not insert vm ip");
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(second, v4.ip.ip());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pick_rng_ip_for_region_test() {
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(MockDb::default());
|
||||
let mgr = NetworkProvisioner::new(db);
|
||||
|
||||
let mac: [u8; 6] = [0xff, 0xff, 0xff, 0xfa, 0xfb, 0xfc];
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(1, v4.region_id);
|
||||
}
|
||||
}
|
@ -1,9 +1,13 @@
|
||||
use crate::exchange::{Currency, ExchangeRateService, Ticker};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use crate::exchange::{Currency, CurrencyAmount, ExchangeRateService, Ticker, TickerRate};
|
||||
use anyhow::{bail, ensure, Result};
|
||||
use chrono::{DateTime, Days, Months, TimeDelta, Utc};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::{LNVpsDb, Vm, VmCostPlan, VmCostPlanIntervalType, VmCustomTemplate, VmPayment};
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::{
|
||||
LNVpsDb, PaymentMethod, Vm, VmCostPlan, VmCostPlanIntervalType, VmCustomTemplate, VmPayment,
|
||||
};
|
||||
use log::info;
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@ -14,36 +18,82 @@ use std::sync::Arc;
|
||||
pub struct PricingEngine {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
tax_rates: HashMap<CountryCode, f32>,
|
||||
}
|
||||
|
||||
impl PricingEngine {
|
||||
/// SATS per BTC
|
||||
const BTC_SATS: f64 = 100_000_000.0;
|
||||
const KB: u64 = 1024;
|
||||
const MB: u64 = Self::KB * 1024;
|
||||
const GB: u64 = Self::MB * 1024;
|
||||
pub fn new(
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
tax_rates: HashMap<CountryCode, f32>,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
rates,
|
||||
tax_rates,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(db: Arc<dyn LNVpsDb>, rates: Arc<dyn ExchangeRateService>) -> Self {
|
||||
Self { db, rates }
|
||||
/// Get amount of time a certain currency amount will extend a vm in seconds
|
||||
pub async fn get_cost_by_amount(
|
||||
&self,
|
||||
vm_id: u64,
|
||||
input: CurrencyAmount,
|
||||
method: PaymentMethod,
|
||||
) -> Result<CostResult> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
|
||||
let cost = if vm.template_id.is_some() {
|
||||
self.get_template_vm_cost(&vm, method).await?
|
||||
} else {
|
||||
self.get_custom_vm_cost(&vm, method).await?
|
||||
};
|
||||
|
||||
match cost {
|
||||
CostResult::Existing(_) => bail!("Invalid response"),
|
||||
CostResult::New {
|
||||
currency,
|
||||
amount,
|
||||
rate,
|
||||
time_value,
|
||||
..
|
||||
} => {
|
||||
ensure!(currency == input.currency(), "Invalid currency");
|
||||
|
||||
// scale cost
|
||||
let scale = input.value() as f64 / amount as f64;
|
||||
let new_time = (time_value as f64 * scale).floor() as u64;
|
||||
ensure!(new_time > 0, "Extend time is less than 1 second");
|
||||
|
||||
Ok(CostResult::New {
|
||||
amount: input.value(),
|
||||
currency,
|
||||
time_value: new_time,
|
||||
new_expiry: vm.expires.add(TimeDelta::seconds(new_time as i64)),
|
||||
rate,
|
||||
tax: self.get_tax_for_user(vm.user_id, input.value()).await?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get VM cost (for renewal)
|
||||
pub async fn get_vm_cost(&self, vm_id: u64) -> Result<CostResult> {
|
||||
pub async fn get_vm_cost(&self, vm_id: u64, method: PaymentMethod) -> Result<CostResult> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
|
||||
// Reuse existing payment until expired
|
||||
let payments = self.db.list_vm_payment(vm.id).await?;
|
||||
if let Some(px) = payments
|
||||
.into_iter()
|
||||
.find(|p| p.expires > Utc::now() && !p.is_paid)
|
||||
.find(|p| p.expires > Utc::now() && !p.is_paid && p.payment_method == method)
|
||||
{
|
||||
return Ok(CostResult::Existing(px));
|
||||
}
|
||||
|
||||
if vm.template_id.is_some() {
|
||||
Ok(self.get_template_vm_cost(&vm).await?)
|
||||
Ok(self.get_template_vm_cost(&vm, method).await?)
|
||||
} else {
|
||||
Ok(self.get_custom_vm_cost(&vm).await?)
|
||||
Ok(self.get_custom_vm_cost(&vm, method).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,9 +130,9 @@ impl PricingEngine {
|
||||
} else {
|
||||
bail!("No disk price found")
|
||||
};
|
||||
let disk_cost = (template.disk_size / Self::GB) as f32 * disk_pricing.cost;
|
||||
let disk_cost = (template.disk_size / crate::GB) as f32 * disk_pricing.cost;
|
||||
let cpu_cost = pricing.cpu_cost * template.cpu as f32;
|
||||
let memory_cost = pricing.memory_cost * (template.memory / Self::GB) as f32;
|
||||
let memory_cost = pricing.memory_cost * (template.memory / crate::GB) as f32;
|
||||
let ip4_cost = pricing.ip4_cost * v4s as f32;
|
||||
let ip6_cost = pricing.ip6_cost * v6s as f32;
|
||||
|
||||
@ -101,7 +151,7 @@ impl PricingEngine {
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_custom_vm_cost(&self, vm: &Vm) -> Result<CostResult> {
|
||||
async fn get_custom_vm_cost(&self, vm: &Vm, method: PaymentMethod) -> Result<CostResult> {
|
||||
let template_id = if let Some(i) = vm.custom_template_id {
|
||||
i
|
||||
} else {
|
||||
@ -114,29 +164,52 @@ impl PricingEngine {
|
||||
|
||||
// custom templates are always 1-month intervals
|
||||
let time_value = (vm.expires.add(Months::new(1)) - vm.expires).num_seconds() as u64;
|
||||
let (cost_msats, rate) = self.get_msats_amount(price.currency, price.total()).await?;
|
||||
let (currency, amount, rate) = self
|
||||
.get_amount_and_rate(
|
||||
CurrencyAmount::from_f32(price.currency, price.total()),
|
||||
method,
|
||||
)
|
||||
.await?;
|
||||
Ok(CostResult::New {
|
||||
msats: cost_msats,
|
||||
amount,
|
||||
tax: self.get_tax_for_user(vm.user_id, amount).await?,
|
||||
currency,
|
||||
rate,
|
||||
time_value,
|
||||
new_expiry: vm.expires.add(TimeDelta::seconds(time_value as i64)),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_msats_amount(&self, currency: Currency, amount: f32) -> Result<(u64, f32)> {
|
||||
let ticker = Ticker(Currency::BTC, currency);
|
||||
let rate = if let Some(r) = self.rates.get_rate(ticker).await {
|
||||
r
|
||||
} else {
|
||||
bail!("No exchange rate found")
|
||||
};
|
||||
|
||||
let cost_btc = amount / rate;
|
||||
let cost_msats = (cost_btc as f64 * Self::BTC_SATS) as u64 * 1000;
|
||||
Ok((cost_msats, rate))
|
||||
async fn get_tax_for_user(&self, user_id: u64, amount: u64) -> Result<u64> {
|
||||
let user = self.db.get_user(user_id).await?;
|
||||
if let Some(cc) = user
|
||||
.country_code
|
||||
.and_then(|c| CountryCode::for_alpha3(&c).ok())
|
||||
{
|
||||
if let Some(c) = self.tax_rates.get(&cc) {
|
||||
return Ok((amount as f64 * (*c as f64 / 100f64)).floor() as u64);
|
||||
}
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn next_template_expire(vm: &Vm, cost_plan: &VmCostPlan) -> u64 {
|
||||
async fn get_ticker(&self, currency: Currency) -> Result<TickerRate> {
|
||||
let ticker = Ticker(Currency::BTC, currency);
|
||||
if let Some(r) = self.rates.get_rate(ticker).await {
|
||||
Ok(TickerRate(ticker, r))
|
||||
} else {
|
||||
bail!("No exchange rate found")
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_msats_amount(&self, amount: CurrencyAmount) -> Result<(u64, f32)> {
|
||||
let rate = self.get_ticker(amount.currency()).await?;
|
||||
let cost_btc = amount.value_f32() / rate.1;
|
||||
let cost_msats = (cost_btc as f64 * crate::BTC_SATS) as u64 * 1000;
|
||||
Ok((cost_msats, rate.1))
|
||||
}
|
||||
|
||||
pub fn next_template_expire(vm: &Vm, cost_plan: &VmCostPlan) -> u64 {
|
||||
let next_expire = match cost_plan.interval_type {
|
||||
VmCostPlanIntervalType::Day => vm.expires.add(Days::new(cost_plan.interval_amount)),
|
||||
VmCostPlanIntervalType::Month => vm
|
||||
@ -150,7 +223,7 @@ impl PricingEngine {
|
||||
(next_expire - vm.expires).num_seconds() as u64
|
||||
}
|
||||
|
||||
async fn get_template_vm_cost(&self, vm: &Vm) -> Result<CostResult> {
|
||||
async fn get_template_vm_cost(&self, vm: &Vm, method: PaymentMethod) -> Result<CostResult> {
|
||||
let template_id = if let Some(i) = vm.template_id {
|
||||
i
|
||||
} else {
|
||||
@ -159,20 +232,37 @@ impl PricingEngine {
|
||||
let template = self.db.get_vm_template(template_id).await?;
|
||||
let cost_plan = self.db.get_cost_plan(template.cost_plan_id).await?;
|
||||
|
||||
let (cost_msats, rate) = self
|
||||
.get_msats_amount(
|
||||
cost_plan.currency.parse().expect("Invalid currency"),
|
||||
cost_plan.amount,
|
||||
)
|
||||
let currency = cost_plan.currency.parse().expect("Invalid currency");
|
||||
let (currency, amount, rate) = self
|
||||
.get_amount_and_rate(CurrencyAmount::from_f32(currency, cost_plan.amount), method)
|
||||
.await?;
|
||||
let time_value = Self::next_template_expire(&vm, &cost_plan);
|
||||
let time_value = Self::next_template_expire(vm, &cost_plan);
|
||||
Ok(CostResult::New {
|
||||
msats: cost_msats,
|
||||
amount,
|
||||
tax: self.get_tax_for_user(vm.user_id, amount).await?,
|
||||
currency,
|
||||
rate,
|
||||
time_value,
|
||||
new_expiry: vm.expires.add(TimeDelta::seconds(time_value as i64)),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_amount_and_rate(
|
||||
&self,
|
||||
list_price: CurrencyAmount,
|
||||
method: PaymentMethod,
|
||||
) -> Result<(Currency, u64, f32)> {
|
||||
Ok(match (list_price.currency(), method) {
|
||||
(c, PaymentMethod::Lightning) if c != Currency::BTC => {
|
||||
let new_price = self.get_msats_amount(list_price).await?;
|
||||
(Currency::BTC, new_price.0, new_price.1)
|
||||
}
|
||||
(cur, PaymentMethod::Revolut) if cur != Currency::BTC => {
|
||||
(cur, list_price.value(), 0.01)
|
||||
}
|
||||
(c, m) => bail!("Cannot create payment for method {} and currency {}", m, c),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -181,14 +271,18 @@ pub enum CostResult {
|
||||
Existing(VmPayment),
|
||||
/// A new payment can be created with the specified amount
|
||||
New {
|
||||
/// The cost in milli-sats
|
||||
msats: u64,
|
||||
/// The cost
|
||||
amount: u64,
|
||||
/// Currency
|
||||
currency: Currency,
|
||||
/// The exchange rate used to calculate the price
|
||||
rate: f32,
|
||||
/// The time to extend the vm expiry in seconds
|
||||
time_value: u64,
|
||||
/// The absolute expiry time of the vm if renewed
|
||||
new_expiry: DateTime<Utc>,
|
||||
/// Taxes to charge
|
||||
tax: u64,
|
||||
},
|
||||
}
|
||||
|
||||
@ -212,8 +306,7 @@ impl PricingData {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::mocks::{MockDb, MockExchangeRate};
|
||||
use lnvps_db::{DiskType, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate};
|
||||
const GB: u64 = 1024 * 1024 * 1024;
|
||||
use lnvps_db::{DiskType, User, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate};
|
||||
const MOCK_RATE: f32 = 100_000.0;
|
||||
|
||||
async fn add_custom_pricing(db: &MockDb) {
|
||||
@ -240,8 +333,8 @@ mod tests {
|
||||
VmCustomTemplate {
|
||||
id: 1,
|
||||
cpu: 2,
|
||||
memory: 2 * GB,
|
||||
disk_size: 80 * GB,
|
||||
memory: 2 * crate::GB,
|
||||
disk_size: 80 * crate::GB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: Default::default(),
|
||||
pricing_id: 1,
|
||||
@ -287,19 +380,80 @@ mod tests {
|
||||
{
|
||||
let mut v = db.vms.lock().await;
|
||||
v.insert(1, MockDb::mock_vm());
|
||||
v.insert(
|
||||
2,
|
||||
Vm {
|
||||
user_id: 2,
|
||||
..MockDb::mock_vm()
|
||||
},
|
||||
);
|
||||
|
||||
let mut u = db.users.lock().await;
|
||||
u.insert(
|
||||
1,
|
||||
User {
|
||||
id: 1,
|
||||
pubkey: vec![],
|
||||
country_code: Some("USA".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
u.insert(
|
||||
2,
|
||||
User {
|
||||
id: 2,
|
||||
pubkey: vec![],
|
||||
country_code: Some("IRL".to_string()),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(db);
|
||||
|
||||
let pe = PricingEngine::new(db.clone(), rates);
|
||||
let price = pe.get_vm_cost(1).await?;
|
||||
let taxes = HashMap::from([(CountryCode::IRL, 23.0)]);
|
||||
|
||||
let pe = PricingEngine::new(db.clone(), rates, taxes);
|
||||
let plan = MockDb::mock_cost_plan();
|
||||
|
||||
let price = pe.get_vm_cost(1, PaymentMethod::Lightning).await?;
|
||||
match price {
|
||||
CostResult::Existing(_) => bail!("??"),
|
||||
CostResult::New { msats, .. } => {
|
||||
CostResult::New { amount, tax, .. } => {
|
||||
let expect_price = (plan.amount / MOCK_RATE * 1.0e11) as u64;
|
||||
assert_eq!(expect_price, msats);
|
||||
assert_eq!(expect_price, amount);
|
||||
assert_eq!(0, tax);
|
||||
}
|
||||
_ => bail!("??"),
|
||||
}
|
||||
|
||||
// with taxes
|
||||
let price = pe.get_vm_cost(2, PaymentMethod::Lightning).await?;
|
||||
match price {
|
||||
CostResult::New { amount, tax, .. } => {
|
||||
let expect_price = (plan.amount / MOCK_RATE * 1.0e11) as u64;
|
||||
assert_eq!(expect_price, amount);
|
||||
assert_eq!((expect_price as f64 * 0.23).floor() as u64, tax);
|
||||
}
|
||||
_ => bail!("??"),
|
||||
}
|
||||
|
||||
// from amount
|
||||
let price = pe
|
||||
.get_cost_by_amount(1, CurrencyAmount::millisats(1000), PaymentMethod::Lightning)
|
||||
.await?;
|
||||
// full month price in msats
|
||||
let mo_price = (plan.amount / MOCK_RATE * 1.0e11) as u64;
|
||||
let time_scale = 1000f64 / mo_price as f64;
|
||||
let vm = db.get_vm(1).await?;
|
||||
let next_expire = PricingEngine::next_template_expire(&vm, &plan);
|
||||
match price {
|
||||
CostResult::New { amount, time_value, tax, .. } => {
|
||||
let expect_time = (next_expire as f64 * time_scale) as u64;
|
||||
assert_eq!(expect_time, time_value);
|
||||
assert_eq!(0, tax);
|
||||
assert_eq!(amount, 1000);
|
||||
}
|
||||
_ => bail!("??"),
|
||||
}
|
||||
|
||||
Ok(())
|
@ -1,6 +1,6 @@
|
||||
use crate::json_api::JsonApi;
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use anyhow::{ensure, Result};
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use base64::engine::general_purpose::STANDARD;
|
||||
use base64::Engine;
|
||||
use log::debug;
|
||||
@ -19,23 +19,28 @@ impl MikrotikRouter {
|
||||
STANDARD.encode(format!("{}:{}", username, password))
|
||||
);
|
||||
Self {
|
||||
api: JsonApi::token(url, &auth).unwrap(),
|
||||
api: JsonApi::token(url, &auth, true).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Router for MikrotikRouter {
|
||||
async fn generate_mac(&self, _ip: &str, _comment: &str) -> Result<Option<ArpEntry>> {
|
||||
// Mikrotik router doesn't care what MAC address you use
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn list_arp_entry(&self) -> Result<Vec<ArpEntry>> {
|
||||
let rsp: Vec<MikrotikArpEntry> = self.api.req(Method::GET, "/rest/ip/arp", ()).await?;
|
||||
Ok(rsp.into_iter().map(|e| e.into()).collect())
|
||||
Ok(rsp.into_iter().filter_map(|e| e.try_into().ok()).collect())
|
||||
}
|
||||
|
||||
async fn add_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry> {
|
||||
let req: MikrotikArpEntry = entry.clone().into();
|
||||
let rsp: MikrotikArpEntry = self.api.req(Method::PUT, "/rest/ip/arp", req).await?;
|
||||
debug!("{:?}", rsp);
|
||||
Ok(rsp.into())
|
||||
Ok(rsp.try_into()?)
|
||||
}
|
||||
|
||||
async fn remove_arp_entry(&self, id: &str) -> Result<()> {
|
||||
@ -59,12 +64,12 @@ impl Router for MikrotikRouter {
|
||||
)
|
||||
.await?;
|
||||
debug!("{:?}", rsp);
|
||||
Ok(rsp.into())
|
||||
Ok(rsp.try_into()?)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MikrotikArpEntry {
|
||||
struct MikrotikArpEntry {
|
||||
#[serde(rename = ".id")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
@ -77,15 +82,17 @@ pub struct MikrotikArpEntry {
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
impl From<MikrotikArpEntry> for ArpEntry {
|
||||
fn from(val: MikrotikArpEntry) -> Self {
|
||||
ArpEntry {
|
||||
id: val.id,
|
||||
address: val.address,
|
||||
mac_address: val.mac_address.unwrap(),
|
||||
interface: Some(val.interface),
|
||||
comment: val.comment,
|
||||
}
|
||||
impl TryFrom<MikrotikArpEntry> for ArpEntry {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: MikrotikArpEntry) -> std::result::Result<Self, Self::Error> {
|
||||
Ok(ArpEntry {
|
||||
id: value.id,
|
||||
address: value.address,
|
||||
mac_address: value.mac_address.context("Mac address is empty")?,
|
||||
interface: Some(value.interface),
|
||||
comment: value.comment,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{ensure, Result};
|
||||
use lnvps_db::{Vm, VmIpAssignment};
|
||||
use rocket::async_trait;
|
||||
|
||||
@ -11,6 +11,8 @@ use rocket::async_trait;
|
||||
/// It also prevents people from re-assigning their IP to another in the range,
|
||||
#[async_trait]
|
||||
pub trait Router: Send + Sync {
|
||||
/// Generate mac address for a given IP address
|
||||
async fn generate_mac(&self, ip: &str, comment: &str) -> Result<Option<ArpEntry>>;
|
||||
async fn list_arp_entry(&self) -> Result<Vec<ArpEntry>>;
|
||||
async fn add_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry>;
|
||||
async fn remove_arp_entry(&self, id: &str) -> Result<()>;
|
||||
@ -28,6 +30,10 @@ pub struct ArpEntry {
|
||||
|
||||
impl ArpEntry {
|
||||
pub fn new(vm: &Vm, ip: &VmIpAssignment, interface: Option<String>) -> Result<Self> {
|
||||
ensure!(
|
||||
vm.mac_address != "ff:ff:ff:ff:ff:ff",
|
||||
"MAC address is invalid because its blank"
|
||||
);
|
||||
Ok(Self {
|
||||
id: ip.arp_ref.clone(),
|
||||
address: ip.ip.clone(),
|
||||
@ -40,5 +46,8 @@ impl ArpEntry {
|
||||
|
||||
#[cfg(feature = "mikrotik")]
|
||||
mod mikrotik;
|
||||
mod ovh;
|
||||
|
||||
#[cfg(feature = "mikrotik")]
|
||||
pub use mikrotik::*;
|
||||
pub use mikrotik::MikrotikRouter;
|
||||
pub use ovh::OvhDedicatedServerVMacRouter;
|
355
lnvps_api/src/router/ovh.rs
Normal file
355
lnvps_api/src/router/ovh.rs
Normal file
@ -0,0 +1,355 @@
|
||||
use crate::json_api::{JsonApi, TokenGen};
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use lnvps_db::async_trait;
|
||||
use log::{info, warn};
|
||||
use nostr::hashes::{sha1, Hash};
|
||||
use nostr::Url;
|
||||
use reqwest::header::{HeaderName, HeaderValue, ACCEPT};
|
||||
use reqwest::{Method, RequestBuilder};
|
||||
use rocket::form::validate::Contains;
|
||||
use rocket::serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::ops::Sub;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// This router is not really a router, but it allows
|
||||
/// managing the virtual mac's for additional IPs on OVH dedicated servers
|
||||
pub struct OvhDedicatedServerVMacRouter {
|
||||
name: String,
|
||||
api: JsonApi,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct OvhTokenGen {
|
||||
time_delta: i64,
|
||||
application_key: String,
|
||||
application_secret: String,
|
||||
consumer_key: String,
|
||||
}
|
||||
|
||||
impl OvhTokenGen {
|
||||
pub fn new(time_delta: i64, token: &str) -> Result<Self> {
|
||||
let mut t_split = token.split(":");
|
||||
Ok(Self {
|
||||
time_delta,
|
||||
application_key: t_split
|
||||
.next()
|
||||
.context("Missing application_key")?
|
||||
.to_string(),
|
||||
application_secret: t_split
|
||||
.next()
|
||||
.context("Missing application_secret")?
|
||||
.to_string(),
|
||||
consumer_key: t_split.next().context("Missing consumer_key")?.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute signature for OVH.
|
||||
fn build_sig(
|
||||
method: &str,
|
||||
query: &str,
|
||||
body: &str,
|
||||
timestamp: &str,
|
||||
aas: &str,
|
||||
ck: &str,
|
||||
) -> String {
|
||||
let sep = "+";
|
||||
let prefix = "$1$".to_string();
|
||||
|
||||
let capacity = 1
|
||||
+ aas.len()
|
||||
+ sep.len()
|
||||
+ ck.len()
|
||||
+ method.len()
|
||||
+ sep.len()
|
||||
+ query.len()
|
||||
+ sep.len()
|
||||
+ body.len()
|
||||
+ sep.len()
|
||||
+ timestamp.len();
|
||||
let mut signature = String::with_capacity(capacity);
|
||||
signature.push_str(aas);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(ck);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(method);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(query);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(body);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(timestamp);
|
||||
|
||||
// debug!("Signature: {}", &signature);
|
||||
let sha1: sha1::Hash = Hash::hash(signature.as_bytes());
|
||||
let sig = hex::encode(sha1);
|
||||
prefix + &sig
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenGen for OvhTokenGen {
|
||||
fn generate_token(
|
||||
&self,
|
||||
method: Method,
|
||||
url: &Url,
|
||||
body: Option<&str>,
|
||||
req: RequestBuilder,
|
||||
) -> Result<RequestBuilder> {
|
||||
let now = Utc::now().timestamp().sub(self.time_delta);
|
||||
let now_string = now.to_string();
|
||||
let sig = Self::build_sig(
|
||||
method.as_str(),
|
||||
url.as_str(),
|
||||
body.unwrap_or(""),
|
||||
now_string.as_str(),
|
||||
&self.application_secret,
|
||||
&self.consumer_key,
|
||||
);
|
||||
Ok(req
|
||||
.header("X-Ovh-Application", &self.application_key)
|
||||
.header("X-Ovh-Consumer", &self.consumer_key)
|
||||
.header("X-Ovh-Timestamp", now_string)
|
||||
.header("X-Ovh-Signature", sig))
|
||||
}
|
||||
}
|
||||
|
||||
impl OvhDedicatedServerVMacRouter {
|
||||
pub async fn new(url: &str, name: &str, token: &str) -> Result<Self> {
|
||||
// load API time delta
|
||||
let time_api = JsonApi::new(url)?;
|
||||
let time = time_api.get_raw("v1/auth/time").await?;
|
||||
let delta: i64 = Utc::now().timestamp().sub(time.parse::<i64>()?);
|
||||
|
||||
Ok(Self {
|
||||
name: name.to_string(),
|
||||
api: JsonApi::token_gen(url, false, OvhTokenGen::new(delta, token)?)?,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_task(&self, task_id: i64) -> Result<OvhTaskResponse> {
|
||||
self.api
|
||||
.get(&format!(
|
||||
"v1/dedicated/server/{}/task/{}",
|
||||
self.name, task_id
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Poll a task until it completes
|
||||
async fn wait_for_task_result(&self, task_id: i64) -> Result<OvhTaskResponse> {
|
||||
loop {
|
||||
let status = self.get_task(task_id).await?;
|
||||
match status.status {
|
||||
OvhTaskStatus::Cancelled => {
|
||||
return Err(anyhow!(
|
||||
"Task was cancelled: {}",
|
||||
status.comment.unwrap_or_default()
|
||||
))
|
||||
}
|
||||
OvhTaskStatus::CustomerError => {
|
||||
return Err(anyhow!(
|
||||
"Task failed: {}",
|
||||
status.comment.unwrap_or_default()
|
||||
))
|
||||
}
|
||||
OvhTaskStatus::Done => return Ok(status),
|
||||
OvhTaskStatus::OvhError => {
|
||||
return Err(anyhow!(
|
||||
"Task failed: {}",
|
||||
status.comment.unwrap_or_default()
|
||||
))
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Router for OvhDedicatedServerVMacRouter {
|
||||
async fn generate_mac(&self, ip: &str, comment: &str) -> Result<Option<ArpEntry>> {
|
||||
info!("[OVH] Generating mac: {}={}", ip, comment);
|
||||
let rsp: OvhTaskResponse = self
|
||||
.api
|
||||
.post(
|
||||
&format!("v1/dedicated/server/{}/virtualMac", &self.name),
|
||||
OvhVMacRequest {
|
||||
ip_address: ip.to_string(),
|
||||
kind: OvhVMacType::Ovh,
|
||||
name: comment.to_string(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.wait_for_task_result(rsp.task_id).await?;
|
||||
|
||||
// api is shit, lookup ip address in list of arp entries
|
||||
let e = self.list_arp_entry().await?;
|
||||
Ok(e.into_iter().find(|e| e.address == ip))
|
||||
}
|
||||
|
||||
async fn list_arp_entry(&self) -> Result<Vec<ArpEntry>> {
|
||||
let rsp: Vec<String> = self
|
||||
.api
|
||||
.get(&format!("v1/dedicated/server/{}/virtualMac", &self.name))
|
||||
.await?;
|
||||
|
||||
let mut ret = vec![];
|
||||
for mac in rsp {
|
||||
let rsp2: Vec<String> = self
|
||||
.api
|
||||
.get(&format!(
|
||||
"v1/dedicated/server/{}/virtualMac/{}/virtualAddress",
|
||||
&self.name, mac
|
||||
))
|
||||
.await?;
|
||||
|
||||
for addr in rsp2 {
|
||||
ret.push(ArpEntry {
|
||||
id: Some(format!("{}={}", mac, &addr)),
|
||||
address: addr,
|
||||
mac_address: mac.clone(),
|
||||
interface: None,
|
||||
comment: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn add_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry> {
|
||||
info!(
|
||||
"[OVH] Adding mac ip: {} {}",
|
||||
entry.mac_address, entry.address
|
||||
);
|
||||
#[derive(Serialize)]
|
||||
struct AddVMacAddressRequest {
|
||||
#[serde(rename = "ipAddress")]
|
||||
pub ip_address: String,
|
||||
#[serde(rename = "virtualMachineName")]
|
||||
pub comment: String,
|
||||
}
|
||||
let id = format!("{}={}", &entry.mac_address, &entry.address);
|
||||
let task: OvhTaskResponse = self
|
||||
.api
|
||||
.post(
|
||||
&format!(
|
||||
"v1/dedicated/server/{}/virtualMac/{}/virtualAddress",
|
||||
&self.name, &entry.mac_address
|
||||
),
|
||||
AddVMacAddressRequest {
|
||||
ip_address: entry.address.clone(),
|
||||
comment: entry.comment.clone().unwrap_or(String::new()),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
self.wait_for_task_result(task.task_id).await?;
|
||||
|
||||
Ok(ArpEntry {
|
||||
id: Some(id),
|
||||
address: entry.address.clone(),
|
||||
mac_address: entry.mac_address.clone(),
|
||||
interface: None,
|
||||
comment: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn remove_arp_entry(&self, id: &str) -> Result<()> {
|
||||
let entries = self.list_arp_entry().await?;
|
||||
if let Some(this_entry) = entries.into_iter().find(|e| e.id == Some(id.to_string())) {
|
||||
info!(
|
||||
"[OVH] Deleting mac ip: {} {}",
|
||||
this_entry.mac_address, this_entry.address
|
||||
);
|
||||
let task: OvhTaskResponse = self
|
||||
.api
|
||||
.req(
|
||||
Method::DELETE,
|
||||
&format!(
|
||||
"v1/dedicated/server/{}/virtualMac/{}/virtualAddress/{}",
|
||||
self.name, this_entry.mac_address, this_entry.address
|
||||
),
|
||||
(),
|
||||
)
|
||||
.await?;
|
||||
self.wait_for_task_result(task.task_id).await?;
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("Cannot remove arp entry, not found")
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry> {
|
||||
// cant patch just return the entry
|
||||
warn!("[OVH] Updating virtual mac is not supported");
|
||||
Ok(entry.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct OvhVMacRequest {
|
||||
#[serde(rename = "ipAddress")]
|
||||
pub ip_address: String,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: OvhVMacType,
|
||||
#[serde(rename = "virtualMachineName")]
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum OvhVMacType {
|
||||
Ovh,
|
||||
VMWare,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct OvhTaskResponse {
|
||||
pub comment: Option<String>,
|
||||
pub done_date: Option<DateTime<Utc>>,
|
||||
pub function: OvhTaskFunction,
|
||||
pub last_update: Option<DateTime<Utc>>,
|
||||
pub need_schedule: bool,
|
||||
pub note: Option<String>,
|
||||
pub planned_intervention_id: Option<i64>,
|
||||
pub start_date: DateTime<Utc>,
|
||||
pub status: OvhTaskStatus,
|
||||
pub tags: Option<Vec<KVSimple>>,
|
||||
pub task_id: i64,
|
||||
pub ticket_reference: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct KVSimple {
|
||||
pub key: Option<String>,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum OvhTaskStatus {
|
||||
Cancelled,
|
||||
CustomerError,
|
||||
Doing,
|
||||
Done,
|
||||
Init,
|
||||
OvhError,
|
||||
Todo,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum OvhTaskFunction {
|
||||
AddVirtualMac,
|
||||
MoveVirtualMac,
|
||||
VirtualMacAdd,
|
||||
VirtualMacDelete,
|
||||
RemoveVirtualMac,
|
||||
}
|
@ -1,20 +1,28 @@
|
||||
use crate::dns::DnsServer;
|
||||
use crate::exchange::ExchangeRateService;
|
||||
use crate::fiat::FiatPaymentService;
|
||||
use crate::lightning::LightningNode;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::router::Router;
|
||||
use anyhow::Result;
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Settings {
|
||||
/// Listen address for http server
|
||||
pub listen: Option<String>,
|
||||
|
||||
/// MYSQL connection string
|
||||
pub db: String,
|
||||
|
||||
/// Public URL mapping to this service
|
||||
pub public_url: String,
|
||||
|
||||
/// Lightning node config for creating LN payments
|
||||
pub lightning: LightningConfig,
|
||||
|
||||
@ -24,24 +32,27 @@ pub struct Settings {
|
||||
/// Provisioning profiles
|
||||
pub provisioner: ProvisionerConfig,
|
||||
|
||||
/// Network policy
|
||||
#[serde(default)]
|
||||
pub network_policy: NetworkPolicy,
|
||||
|
||||
/// Number of days after an expired VM is deleted
|
||||
pub delete_after: u16,
|
||||
|
||||
/// SMTP settings for sending emails
|
||||
pub smtp: Option<SmtpConfig>,
|
||||
|
||||
/// Network router config
|
||||
pub router: Option<RouterConfig>,
|
||||
|
||||
/// DNS configurations for PTR records
|
||||
pub dns: Option<DnsServerConfig>,
|
||||
|
||||
/// Nostr config for sending DMs
|
||||
pub nostr: Option<NostrConfig>,
|
||||
|
||||
/// Config for accepting revolut payments
|
||||
pub revolut: Option<RevolutConfig>,
|
||||
|
||||
#[serde(default)]
|
||||
/// Tax rates to change per country as a percent of the amount
|
||||
pub tax_rate: HashMap<CountryCode, f32>,
|
||||
|
||||
/// public host of lnvps_nostr service
|
||||
pub nostr_address_host: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
@ -68,79 +79,55 @@ pub struct NostrConfig {
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum RouterConfig {
|
||||
Mikrotik {
|
||||
url: String,
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
pub struct DnsServerConfig {
|
||||
pub forward_zone_id: String,
|
||||
pub api: DnsServerApi,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DnsServerConfig {
|
||||
pub enum DnsServerApi {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Cloudflare {
|
||||
token: String,
|
||||
forward_zone_id: String,
|
||||
reverse_zone_id: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Policy that determines how packets arrive at the VM
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum NetworkAccessPolicy {
|
||||
/// No special procedure required for packets to arrive
|
||||
#[default]
|
||||
Auto,
|
||||
/// ARP entries are added statically on the access router
|
||||
StaticArp {
|
||||
/// Interface used to add arp entries
|
||||
interface: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct NetworkPolicy {
|
||||
/// Policy that determines how packets arrive at the VM
|
||||
pub access: NetworkAccessPolicy,
|
||||
|
||||
/// Use SLAAC for IPv6 allocation
|
||||
pub ip6_slaac: Option<bool>,
|
||||
Cloudflare { token: String },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct SmtpConfig {
|
||||
/// Admin user id, for sending system notifications
|
||||
pub admin: Option<u64>,
|
||||
|
||||
/// Email server host:port
|
||||
pub server: String,
|
||||
|
||||
/// From header to use, otherwise empty
|
||||
pub from: Option<String>,
|
||||
|
||||
/// Username for SMTP connection
|
||||
pub username: String,
|
||||
|
||||
/// Password for SMTP connection
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum ProvisionerConfig {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Proxmox {
|
||||
/// Generic VM configuration
|
||||
qemu: QemuConfig,
|
||||
/// SSH config for issuing commands via CLI
|
||||
ssh: Option<SshConfig>,
|
||||
/// MAC address prefix for NIC (eg. bc:24:11)
|
||||
mac_prefix: Option<String>,
|
||||
},
|
||||
pub struct ProvisionerConfig {
|
||||
pub proxmox: Option<ProxmoxConfig>,
|
||||
pub libvirt: Option<LibVirtConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ProxmoxConfig {
|
||||
/// Generic VM configuration
|
||||
pub qemu: QemuConfig,
|
||||
/// SSH config for issuing commands via CLI
|
||||
pub ssh: Option<SshConfig>,
|
||||
/// MAC address prefix for NIC (eg. bc:24:11)
|
||||
pub mac_prefix: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LibVirtConfig {
|
||||
/// Generic VM configuration
|
||||
pub qemu: QemuConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
@ -162,10 +149,19 @@ pub struct QemuConfig {
|
||||
pub bridge: String,
|
||||
/// CPU type
|
||||
pub cpu: String,
|
||||
/// VLAN tag all spawned VM's
|
||||
pub vlan: Option<u16>,
|
||||
/// Enable virtualization inside VM
|
||||
pub kvm: bool,
|
||||
/// CPU architecture
|
||||
pub arch: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RevolutConfig {
|
||||
pub url: Option<String>,
|
||||
pub api_version: String,
|
||||
pub token: String,
|
||||
pub public_key: String,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
@ -178,32 +174,6 @@ impl Settings {
|
||||
Arc::new(LNVpsProvisioner::new(self.clone(), db, node, exchange))
|
||||
}
|
||||
|
||||
pub fn get_router(&self) -> Result<Option<Arc<dyn Router>>> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
if let Some(_router) = &self.router {
|
||||
let router = crate::mocks::MockRouter::new(self.network_policy.clone());
|
||||
Ok(Some(Arc::new(router)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
match &self.router {
|
||||
#[cfg(feature = "mikrotik")]
|
||||
Some(RouterConfig::Mikrotik {
|
||||
url,
|
||||
username,
|
||||
password,
|
||||
}) => Ok(Some(Arc::new(crate::router::MikrotikRouter::new(
|
||||
url, username, password,
|
||||
)))),
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_dns(&self) -> Result<Option<Arc<dyn DnsServer>>> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
@ -213,17 +183,63 @@ impl Settings {
|
||||
{
|
||||
match &self.dns {
|
||||
None => Ok(None),
|
||||
#[cfg(feature = "cloudflare")]
|
||||
Some(DnsServerConfig::Cloudflare {
|
||||
token,
|
||||
forward_zone_id,
|
||||
reverse_zone_id,
|
||||
}) => Ok(Some(Arc::new(crate::dns::Cloudflare::new(
|
||||
token,
|
||||
reverse_zone_id,
|
||||
forward_zone_id,
|
||||
)))),
|
||||
Some(c) => match &c.api {
|
||||
#[cfg(feature = "cloudflare")]
|
||||
DnsServerApi::Cloudflare { token } => {
|
||||
Ok(Some(Arc::new(crate::dns::Cloudflare::new(token))))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_revolut(&self) -> Result<Option<Arc<dyn FiatPaymentService>>> {
|
||||
match &self.revolut {
|
||||
#[cfg(feature = "revolut")]
|
||||
Some(c) => Ok(Some(Arc::new(crate::fiat::RevolutApi::new(c.clone())?))),
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn mock_settings() -> Settings {
|
||||
Settings {
|
||||
listen: None,
|
||||
db: "".to_string(),
|
||||
public_url: "http://localhost:8000".to_string(),
|
||||
lightning: LightningConfig::LND {
|
||||
url: "".to_string(),
|
||||
cert: Default::default(),
|
||||
macaroon: Default::default(),
|
||||
},
|
||||
read_only: false,
|
||||
provisioner: ProvisionerConfig {
|
||||
proxmox: Some(ProxmoxConfig {
|
||||
qemu: QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr1".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
kvm: false,
|
||||
arch: "x86_64".to_string(),
|
||||
},
|
||||
ssh: None,
|
||||
mac_prefix: Some("ff:ff:ff".to_string()),
|
||||
}),
|
||||
libvirt: None,
|
||||
},
|
||||
delete_after: 0,
|
||||
smtp: None,
|
||||
dns: Some(DnsServerConfig {
|
||||
forward_zone_id: "mock-forward-zone-id".to_string(),
|
||||
api: DnsServerApi::Cloudflare {
|
||||
token: "abc".to_string(),
|
||||
},
|
||||
}),
|
||||
nostr: None,
|
||||
revolut: None,
|
||||
tax_rate: HashMap::from([(CountryCode::IRL, 23.0), (CountryCode::USA, 1.0)]),
|
||||
nostr_address_host: None,
|
||||
}
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use log::info;
|
||||
use ssh2::Channel;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::net::{TcpStream, ToSocketAddrs};
|
||||
|
||||
pub struct SshClient {
|
||||
@ -34,6 +34,12 @@ impl SshClient {
|
||||
Ok(channel)
|
||||
}
|
||||
|
||||
pub fn tunnel_unix_socket(&mut self, remote_path: &Path) -> Result<Channel> {
|
||||
self.session
|
||||
.channel_direct_streamlocal(remote_path.to_str().unwrap(), None)
|
||||
.map_err(|e| anyhow!(e))
|
||||
}
|
||||
|
||||
pub async fn execute(&mut self, command: &str) -> Result<(i32, String)> {
|
||||
info!("Executing command: {}", command);
|
||||
let mut channel = self.session.channel_session()?;
|
@ -2,13 +2,13 @@ use crate::host::get_host_client;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::settings::{ProvisionerConfig, Settings, SmtpConfig};
|
||||
use crate::status::{VmRunningState, VmState, VmStateCache};
|
||||
use anyhow::Result;
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::{DateTime, Datelike, Days, Utc};
|
||||
use lettre::message::{MessageBuilder, MultiPart};
|
||||
use lettre::transport::smtp::authentication::Credentials;
|
||||
use lettre::AsyncTransport;
|
||||
use lettre::{AsyncSmtpTransport, Tokio1Executor};
|
||||
use lnvps_db::{LNVpsDb, Vm};
|
||||
use lnvps_db::{LNVpsDb, Vm, VmHost};
|
||||
use log::{debug, error, info, warn};
|
||||
use nostr::{EventBuilder, PublicKey, ToBech32};
|
||||
use nostr_sdk::Client;
|
||||
@ -18,6 +18,8 @@ use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum WorkJob {
|
||||
/// Sync resources from hosts to database
|
||||
PatchHosts,
|
||||
/// Check all running VMS
|
||||
CheckVms,
|
||||
/// Check the VM status matches database state
|
||||
@ -251,7 +253,7 @@ impl Worker {
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
c.send_event(ev).await?;
|
||||
c.send_event(&ev).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -278,46 +280,97 @@ impl Worker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self) -> Result<()> {
|
||||
while let Some(job) = self.rx.recv().await {
|
||||
match &job {
|
||||
WorkJob::CheckVm { vm_id } => {
|
||||
let vm = self.db.get_vm(*vm_id).await?;
|
||||
if let Err(e) = self.check_vm(&vm).await {
|
||||
error!("Failed to check VM {}: {}", vm_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM {}:\n{:?}\n{}", vm_id, &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
async fn patch_host(&self, host: &mut VmHost) -> Result<()> {
|
||||
let client = match get_host_client(host, &self.settings.provisioner_config) {
|
||||
Ok(h) => h,
|
||||
Err(e) => bail!("Failed to get host client: {} {}", host.name, e),
|
||||
};
|
||||
let info = client.get_info().await?;
|
||||
let needs_update = info.cpu != host.cpu || info.memory != host.memory;
|
||||
if needs_update {
|
||||
host.cpu = info.cpu;
|
||||
host.memory = info.memory;
|
||||
self.db.update_host(host).await?;
|
||||
info!(
|
||||
"Updated host {}: cpu={}, memory={}",
|
||||
host.name, host.cpu, host.memory
|
||||
);
|
||||
}
|
||||
|
||||
let mut host_disks = self.db.list_host_disks(host.id).await?;
|
||||
for disk in &info.disks {
|
||||
if let Some(mut hd) = host_disks.iter_mut().find(|d| d.name == disk.name) {
|
||||
if hd.size != disk.size {
|
||||
hd.size = disk.size;
|
||||
self.db.update_host_disk(hd).await?;
|
||||
info!(
|
||||
"Updated host disk {}: size={},type={},interface={}",
|
||||
hd.name, hd.size, hd.kind, hd.interface
|
||||
);
|
||||
}
|
||||
} else {
|
||||
warn!("Un-mapped host disk {}", disk.name);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_job(&mut self, job: &WorkJob) -> Result<()> {
|
||||
match job {
|
||||
WorkJob::PatchHosts => {
|
||||
let mut hosts = self.db.list_hosts().await?;
|
||||
for mut host in &mut hosts {
|
||||
info!("Patching host {}", host.name);
|
||||
if let Err(e) = self.patch_host(&mut host).await {
|
||||
error!("Failed to patch host {}: {}", host.name, e);
|
||||
}
|
||||
}
|
||||
WorkJob::SendNotification {
|
||||
user_id,
|
||||
message,
|
||||
title,
|
||||
} => {
|
||||
if let Err(e) = self
|
||||
.send_notification(*user_id, message.clone(), title.clone())
|
||||
.await
|
||||
{
|
||||
error!("Failed to send notification {}: {}", user_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to send notification:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
WorkJob::CheckVm { vm_id } => {
|
||||
let vm = self.db.get_vm(*vm_id).await?;
|
||||
if let Err(e) = self.check_vm(&vm).await {
|
||||
error!("Failed to check VM {}: {}", vm_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM {}:\n{:?}\n{}", vm_id, &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
WorkJob::CheckVms => {
|
||||
if let Err(e) = self.check_vms().await {
|
||||
error!("Failed to check VMs: {}", e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM's:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
WorkJob::SendNotification {
|
||||
user_id,
|
||||
message,
|
||||
title,
|
||||
} => {
|
||||
if let Err(e) = self
|
||||
.send_notification(*user_id, message.clone(), title.clone())
|
||||
.await
|
||||
{
|
||||
error!("Failed to send notification {}: {}", user_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to send notification:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
WorkJob::CheckVms => {
|
||||
if let Err(e) = self.check_vms().await {
|
||||
error!("Failed to check VMs: {}", e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM's:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self) -> Result<()> {
|
||||
while let Some(job) = self.rx.recv().await {
|
||||
if let Err(e) = self.try_job(&job).await {
|
||||
error!("Job failed to execute: {:?} {}", job, e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
7
lnvps_common/Cargo.toml
Normal file
7
lnvps_common/Cargo.toml
Normal file
@ -0,0 +1,7 @@
|
||||
[package]
|
||||
name = "lnvps_common"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
rocket.workspace = true
|
@ -1,8 +1,9 @@
|
||||
use rocket::fairing::{Fairing, Info, Kind};
|
||||
use rocket::http::{Header, Method, Status};
|
||||
use rocket::{Request, Response};
|
||||
use std::io::Cursor;
|
||||
use rocket::http::Header;
|
||||
use rocket::route::{Handler, Outcome};
|
||||
use rocket::{Data, Request, Response};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CORS;
|
||||
|
||||
#[rocket::async_trait]
|
||||
@ -14,7 +15,7 @@ impl Fairing for CORS {
|
||||
}
|
||||
}
|
||||
|
||||
async fn on_response<'r>(&self, req: &'r Request<'_>, response: &mut Response<'r>) {
|
||||
async fn on_response<'r>(&self, _req: &'r Request<'_>, response: &mut Response<'r>) {
|
||||
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
|
||||
response.set_header(Header::new(
|
||||
"Access-Control-Allow-Methods",
|
||||
@ -22,11 +23,12 @@ impl Fairing for CORS {
|
||||
));
|
||||
response.set_header(Header::new("Access-Control-Allow-Headers", "*"));
|
||||
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
|
||||
|
||||
// force status 200 for options requests
|
||||
if req.method() == Method::Options {
|
||||
response.set_status(Status::Ok);
|
||||
response.set_sized_body(None, Cursor::new(""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl Handler for CORS {
|
||||
async fn handle<'r>(&self, _request: &'r Request<'_>, _data: Data<'r>) -> Outcome<'r> {
|
||||
Outcome::Success(Response::new())
|
||||
}
|
||||
}
|
2
lnvps_common/src/lib.rs
Normal file
2
lnvps_common/src/lib.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub mod cors;
|
||||
pub use cors::*;
|
455
lnvps_db/Cargo.lock
generated
455
lnvps_db/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -6,9 +6,10 @@ edition = "2021"
|
||||
[features]
|
||||
default = ["mysql"]
|
||||
mysql = ["sqlx/mysql"]
|
||||
nostr-domain = []
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.83"
|
||||
anyhow.workspace = true
|
||||
sqlx = { version = "0.8.2", features = ["chrono", "migrate", "runtime-tokio"] }
|
||||
chrono = { version = "0.4.38", features = ["serde"] }
|
||||
async-trait = "0.1.83"
|
||||
|
6
lnvps_db/migrations/20250310153305_fiat_payment.sql
Normal file
6
lnvps_db/migrations/20250310153305_fiat_payment.sql
Normal file
@ -0,0 +1,6 @@
|
||||
alter table vm_payment
|
||||
add column currency varchar(5) not null default 'BTC',
|
||||
add column payment_method smallint unsigned not null default 0,
|
||||
add column external_id varchar(255),
|
||||
change invoice external_data varchar (4096) NOT NULL,
|
||||
drop column settle_index;
|
4
lnvps_db/migrations/20250311135933_taxes.sql
Normal file
4
lnvps_db/migrations/20250311135933_taxes.sql
Normal file
@ -0,0 +1,4 @@
|
||||
alter table vm_payment
|
||||
add column tax bigint unsigned not null;
|
||||
alter table users
|
||||
add column country_code varchar(3) not null default 'USA';
|
5
lnvps_db/migrations/20250313140640_empty_country.sql
Normal file
5
lnvps_db/migrations/20250313140640_empty_country.sql
Normal file
@ -0,0 +1,5 @@
|
||||
alter table users
|
||||
change column country_code country_code varchar (3);
|
||||
-- assume country_code was not actually set until now
|
||||
update users
|
||||
set country_code = null;
|
5
lnvps_db/migrations/20250324143556_load_factors.sql
Normal file
5
lnvps_db/migrations/20250324143556_load_factors.sql
Normal file
@ -0,0 +1,5 @@
|
||||
-- Add migration script here
|
||||
alter table vm_host
|
||||
add column load_memory float not null default 1.0,
|
||||
add column load_disk float not null default 1.0,
|
||||
change column load_factor load_cpu float not null default 1.0
|
23
lnvps_db/migrations/20250325113115_extend_ip_range.sql
Normal file
23
lnvps_db/migrations/20250325113115_extend_ip_range.sql
Normal file
@ -0,0 +1,23 @@
|
||||
create table router
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
name varchar(100) not null,
|
||||
enabled bit(1) not null,
|
||||
kind smallint unsigned not null,
|
||||
url varchar(255) not null,
|
||||
token varchar(128) not null
|
||||
);
|
||||
create table access_policy
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
name varchar(100) not null,
|
||||
kind smallint unsigned not null,
|
||||
router_id integer unsigned,
|
||||
interface varchar(100),
|
||||
constraint fk_access_policy_router foreign key (router_id) references router (id)
|
||||
);
|
||||
alter table ip_range
|
||||
add column reverse_zone_id varchar(255),
|
||||
add column access_policy_id integer unsigned;
|
||||
alter table ip_range
|
||||
add constraint fk_ip_range_access_policy foreign key (access_policy_id) references access_policy (id);
|
@ -0,0 +1,2 @@
|
||||
alter table vm_host
|
||||
add column vlan_id integer unsigned;
|
@ -0,0 +1,3 @@
|
||||
alter table ip_range
|
||||
add column allocation_mode smallint unsigned not null default 0,
|
||||
add column use_full_range bit(1) not null;
|
4
lnvps_db/migrations/20250328220956_fixes.sql
Normal file
4
lnvps_db/migrations/20250328220956_fixes.sql
Normal file
@ -0,0 +1,4 @@
|
||||
-- Add migration script here
|
||||
ALTER TABLE vm_ip_assignment DROP KEY ix_vm_ip_assignment_ip;
|
||||
alter table vm_os_image
|
||||
add column default_username varchar(50);
|
25
lnvps_db/migrations/20250402115943_nostr_address.sql
Normal file
25
lnvps_db/migrations/20250402115943_nostr_address.sql
Normal file
@ -0,0 +1,25 @@
|
||||
-- Add migration script here
|
||||
create table nostr_domain
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
owner_id integer unsigned not null,
|
||||
name varchar(200) not null,
|
||||
enabled bit(1) not null default 0,
|
||||
created timestamp not null default current_timestamp,
|
||||
relays varchar(1024),
|
||||
|
||||
unique key ix_domain_unique (name),
|
||||
constraint fk_nostr_domain_user foreign key (owner_id) references users (id)
|
||||
);
|
||||
create table nostr_domain_handle
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
domain_id integer unsigned not null,
|
||||
handle varchar(100) not null,
|
||||
created timestamp not null default current_timestamp,
|
||||
pubkey binary(32) not null,
|
||||
relays varchar(1024),
|
||||
|
||||
unique key ix_domain_handle_unique (domain_id, handle),
|
||||
constraint fk_nostr_domain_handle_domain foreign key (domain_id) references nostr_domain (id) on delete cascade
|
||||
)
|
9
lnvps_db/migrations/20250501132143_billing_info.sql
Normal file
9
lnvps_db/migrations/20250501132143_billing_info.sql
Normal file
@ -0,0 +1,9 @@
|
||||
-- Add migration script here
|
||||
alter table users
|
||||
add column billing_name varchar(200),
|
||||
add column billing_address_1 varchar(200),
|
||||
add column billing_address_2 varchar(200),
|
||||
add column billing_city varchar(100),
|
||||
add column billing_state varchar(100),
|
||||
add column billing_postcode varchar(50),
|
||||
add column billing_tax_id varchar(50);
|
19
lnvps_db/migrations/20250501162308_company_info.sql
Normal file
19
lnvps_db/migrations/20250501162308_company_info.sql
Normal file
@ -0,0 +1,19 @@
|
||||
-- Add migration script here
|
||||
create table company
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
created timestamp not null default current_timestamp,
|
||||
name varchar(100) not null,
|
||||
email varchar(100) not null,
|
||||
phone varchar(100),
|
||||
address_1 varchar(200),
|
||||
address_2 varchar(200),
|
||||
city varchar(100),
|
||||
state varchar(100),
|
||||
postcode varchar(50),
|
||||
country_code varchar(3),
|
||||
tax_id varchar(50)
|
||||
);
|
||||
alter table vm_host_region
|
||||
add column company_id integer unsigned,
|
||||
add constraint fk_host_region_company foreign key (company_id) references company (id);
|
@ -10,7 +10,7 @@ pub use mysql::*;
|
||||
pub use async_trait::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait LNVpsDb: Sync + Send {
|
||||
pub trait LNVpsDb: LNVPSNostrDb + Send + Sync {
|
||||
/// Migrate database
|
||||
async fn migrate(&self) -> Result<()>;
|
||||
|
||||
@ -38,9 +38,15 @@ pub trait LNVpsDb: Sync + Send {
|
||||
/// List a users ssh keys
|
||||
async fn list_user_ssh_key(&self, user_id: u64) -> Result<Vec<UserSshKey>>;
|
||||
|
||||
/// Get VM host regions
|
||||
async fn list_host_region(&self) -> Result<Vec<VmHostRegion>>;
|
||||
|
||||
/// Get VM host region by id
|
||||
async fn get_host_region(&self, id: u64) -> Result<VmHostRegion>;
|
||||
|
||||
/// Get VM host region by name
|
||||
async fn get_host_region_by_name(&self, name: &str) -> Result<VmHostRegion>;
|
||||
|
||||
/// List VM's owned by a specific user
|
||||
async fn list_hosts(&self) -> Result<Vec<VmHost>>;
|
||||
|
||||
@ -56,6 +62,9 @@ pub trait LNVpsDb: Sync + Send {
|
||||
/// Get a specific host disk
|
||||
async fn get_host_disk(&self, disk_id: u64) -> Result<VmHostDisk>;
|
||||
|
||||
/// Update a host disk
|
||||
async fn update_host_disk(&self, disk: &VmHostDisk) -> Result<()>;
|
||||
|
||||
/// Get OS image by id
|
||||
async fn get_os_image(&self, id: u64) -> Result<VmOsImage>;
|
||||
|
||||
@ -131,6 +140,9 @@ pub trait LNVpsDb: Sync + Send {
|
||||
/// Get VM payment by payment id
|
||||
async fn get_vm_payment(&self, id: &Vec<u8>) -> Result<VmPayment>;
|
||||
|
||||
/// Get VM payment by payment id
|
||||
async fn get_vm_payment_by_ext_id(&self, id: &str) -> Result<VmPayment>;
|
||||
|
||||
/// Update a VM payment record
|
||||
async fn update_vm_payment(&self, vm_payment: &VmPayment) -> Result<()>;
|
||||
|
||||
@ -154,4 +166,50 @@ pub trait LNVpsDb: Sync + Send {
|
||||
|
||||
/// Return the list of disk prices for a given custom pricing model
|
||||
async fn list_custom_pricing_disk(&self, pricing_id: u64) -> Result<Vec<VmCustomPricingDisk>>;
|
||||
|
||||
/// Get router config
|
||||
async fn get_router(&self, router_id: u64) -> Result<Router>;
|
||||
|
||||
/// Get access policy
|
||||
async fn get_access_policy(&self, access_policy_id: u64) -> Result<AccessPolicy>;
|
||||
|
||||
/// Get company
|
||||
async fn get_company(&self, company_id: u64) -> Result<Company>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "nostr-domain")]
|
||||
#[async_trait]
|
||||
pub trait LNVPSNostrDb: Sync + Send {
|
||||
/// Get single handle for a domain
|
||||
async fn get_handle(&self, handle_id: u64) -> Result<NostrDomainHandle>;
|
||||
|
||||
/// Get single handle for a domain
|
||||
async fn get_handle_by_name(&self, domain_id: u64, handle: &str) -> Result<NostrDomainHandle>;
|
||||
|
||||
/// Insert a new handle
|
||||
async fn insert_handle(&self, handle: &NostrDomainHandle) -> Result<u64>;
|
||||
|
||||
/// Update an existing domain handle
|
||||
async fn update_handle(&self, handle: &NostrDomainHandle) -> Result<()>;
|
||||
|
||||
/// Delete handle entry
|
||||
async fn delete_handle(&self, handle_id: u64) -> Result<()>;
|
||||
|
||||
/// List handles
|
||||
async fn list_handles(&self, domain_id: u64) -> Result<Vec<NostrDomainHandle>>;
|
||||
|
||||
/// Get domain object by id
|
||||
async fn get_domain(&self, id: u64) -> Result<NostrDomain>;
|
||||
|
||||
/// Get domain object by name
|
||||
async fn get_domain_by_name(&self, name: &str) -> Result<NostrDomain>;
|
||||
|
||||
/// List domains owned by a user
|
||||
async fn list_domains(&self, owner_id: u64) -> Result<Vec<NostrDomain>>;
|
||||
|
||||
/// Insert a new domain
|
||||
async fn insert_domain(&self, domain: &NostrDomain) -> Result<u64>;
|
||||
|
||||
/// Delete a domain
|
||||
async fn delete_domain(&self, domain_id: u64) -> Result<()>;
|
||||
}
|
||||
|
@ -1,11 +1,12 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::FromRow;
|
||||
use sqlx::{FromRow, Type};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use url::Url;
|
||||
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
/// Users who buy VM's
|
||||
pub struct User {
|
||||
/// Unique ID of this user (database generated)
|
||||
@ -20,6 +21,22 @@ pub struct User {
|
||||
pub contact_nip17: bool,
|
||||
/// If user should be contacted via email for notifications
|
||||
pub contact_email: bool,
|
||||
/// Users country
|
||||
pub country_code: Option<String>,
|
||||
/// Name to show on invoices
|
||||
pub billing_name: Option<String>,
|
||||
/// Billing address line 1
|
||||
pub billing_address_1: Option<String>,
|
||||
/// Billing address line 2
|
||||
pub billing_address_2: Option<String>,
|
||||
/// Billing city
|
||||
pub billing_city: Option<String>,
|
||||
/// Billing state/county
|
||||
pub billing_state: Option<String>,
|
||||
/// Billing postcode/zip
|
||||
pub billing_postcode: Option<String>,
|
||||
/// Billing tax id
|
||||
pub billing_tax_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
@ -54,6 +71,7 @@ pub struct VmHostRegion {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub enabled: bool,
|
||||
pub company_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
@ -77,8 +95,14 @@ pub struct VmHost {
|
||||
pub enabled: bool,
|
||||
/// API token used to control this host via [ip]
|
||||
pub api_token: String,
|
||||
/// Load factor for provisioning
|
||||
pub load_factor: f32,
|
||||
/// CPU load factor for provisioning
|
||||
pub load_cpu: f32,
|
||||
/// Memory load factor
|
||||
pub load_memory: f32,
|
||||
/// Disk load factor
|
||||
pub load_disk: f32,
|
||||
/// VLAN id assigned to all vms on the host
|
||||
pub vlan_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
@ -100,6 +124,27 @@ pub enum DiskType {
|
||||
SSD = 1,
|
||||
}
|
||||
|
||||
impl FromStr for DiskType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"hdd" => Ok(DiskType::HDD),
|
||||
"ssd" => Ok(DiskType::SSD),
|
||||
_ => Err(anyhow!("unknown disk type {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DiskType {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DiskType::HDD => write!(f, "hdd"),
|
||||
DiskType::SSD => write!(f, "ssd"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, sqlx::Type, Default, PartialEq, Eq)]
|
||||
#[repr(u16)]
|
||||
pub enum DiskInterface {
|
||||
@ -109,6 +154,29 @@ pub enum DiskInterface {
|
||||
PCIe = 2,
|
||||
}
|
||||
|
||||
impl FromStr for DiskInterface {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"sata" => Ok(DiskInterface::SATA),
|
||||
"scsi" => Ok(DiskInterface::SCSI),
|
||||
"pcie" => Ok(DiskInterface::PCIe),
|
||||
_ => Err(anyhow!("unknown disk interface {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DiskInterface {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DiskInterface::SATA => write!(f, "sata"),
|
||||
DiskInterface::SCSI => write!(f, "scsi"),
|
||||
DiskInterface::PCIe => write!(f, "pcie"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, sqlx::Type, Default, PartialEq, Eq)]
|
||||
#[repr(u16)]
|
||||
pub enum OsDistribution {
|
||||
@ -123,6 +191,24 @@ pub enum OsDistribution {
|
||||
RedHatEnterprise = 7,
|
||||
}
|
||||
|
||||
impl FromStr for OsDistribution {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"ubuntu" => Ok(OsDistribution::Ubuntu),
|
||||
"debian" => Ok(OsDistribution::Debian),
|
||||
"centos" => Ok(OsDistribution::CentOS),
|
||||
"fedora" => Ok(OsDistribution::Fedora),
|
||||
"freebsd" => Ok(OsDistribution::FreeBSD),
|
||||
"opensuse" => Ok(OsDistribution::OpenSUSE),
|
||||
"archlinux" => Ok(OsDistribution::ArchLinux),
|
||||
"redhatenterprise" => Ok(OsDistribution::RedHatEnterprise),
|
||||
_ => Err(anyhow!("unknown distribution {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// OS Images are templates which are used as a basis for
|
||||
/// provisioning new vms
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
@ -135,6 +221,7 @@ pub struct VmOsImage {
|
||||
pub release_date: DateTime<Utc>,
|
||||
/// URL location of cloud image
|
||||
pub url: String,
|
||||
pub default_username: Option<String>,
|
||||
}
|
||||
|
||||
impl VmOsImage {
|
||||
@ -158,12 +245,68 @@ impl Display for VmOsImage {
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
pub struct Router {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub enabled: bool,
|
||||
pub kind: RouterKind,
|
||||
pub url: String,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::Type)]
|
||||
#[repr(u16)]
|
||||
pub enum RouterKind {
|
||||
/// Mikrotik router (JSON-Api)
|
||||
Mikrotik = 0,
|
||||
/// A pseudo-router which allows adding virtual mac addresses to a dedicated server
|
||||
OvhAdditionalIp = 1,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct IpRange {
|
||||
pub id: u64,
|
||||
pub cidr: String,
|
||||
pub gateway: String,
|
||||
pub enabled: bool,
|
||||
pub region_id: u64,
|
||||
pub reverse_zone_id: Option<String>,
|
||||
pub access_policy_id: Option<u64>,
|
||||
pub allocation_mode: IpRangeAllocationMode,
|
||||
/// Use all IPs in the range, including first and last
|
||||
pub use_full_range: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::Type, Default)]
|
||||
#[repr(u16)]
|
||||
/// How ips are allocated from this range
|
||||
pub enum IpRangeAllocationMode {
|
||||
/// IPs are assigned in a random order
|
||||
Random = 0,
|
||||
#[default]
|
||||
/// IPs are assigned in sequential order
|
||||
Sequential = 1,
|
||||
/// IP(v6) assignment uses SLAAC EUI-64
|
||||
SlaacEui64 = 2,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
pub struct AccessPolicy {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub kind: NetworkAccessPolicy,
|
||||
/// Router used to apply this network access policy
|
||||
pub router_id: Option<u64>,
|
||||
/// Interface name used to apply this policy
|
||||
pub interface: Option<String>,
|
||||
}
|
||||
|
||||
/// Policy that determines how packets arrive at the VM
|
||||
#[derive(Debug, Clone, sqlx::Type)]
|
||||
#[repr(u16)]
|
||||
pub enum NetworkAccessPolicy {
|
||||
/// ARP entries are added statically on the access router
|
||||
StaticArp = 0,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, sqlx::Type)]
|
||||
@ -309,17 +452,92 @@ impl Display for VmIpAssignment {
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct VmPayment {
|
||||
/// Payment hash
|
||||
pub id: Vec<u8>,
|
||||
pub vm_id: u64,
|
||||
pub created: DateTime<Utc>,
|
||||
pub expires: DateTime<Utc>,
|
||||
pub amount: u64,
|
||||
pub invoice: String,
|
||||
pub currency: String,
|
||||
pub payment_method: PaymentMethod,
|
||||
/// External data (invoice / json)
|
||||
pub external_data: String,
|
||||
/// External id on other system
|
||||
pub external_id: Option<String>,
|
||||
pub is_paid: bool,
|
||||
/// Exchange rate
|
||||
/// TODO: handle other base currencies
|
||||
/// Exchange rate back to base currency (EUR)
|
||||
pub rate: f32,
|
||||
/// Number of seconds this payment will add to vm expiry
|
||||
pub time_value: u64,
|
||||
pub settle_index: Option<u64>,
|
||||
/// Taxes to charge on payment
|
||||
pub tax: u64,
|
||||
}
|
||||
|
||||
#[derive(Type, Clone, Copy, Debug, Default, PartialEq)]
|
||||
#[repr(u16)]
|
||||
pub enum PaymentMethod {
|
||||
#[default]
|
||||
Lightning,
|
||||
Revolut,
|
||||
Paypal,
|
||||
}
|
||||
|
||||
impl Display for PaymentMethod {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PaymentMethod::Lightning => write!(f, "Lightning"),
|
||||
PaymentMethod::Revolut => write!(f, "Revolut"),
|
||||
PaymentMethod::Paypal => write!(f, "PayPal"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PaymentMethod {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s {
|
||||
"lightning" => Ok(PaymentMethod::Lightning),
|
||||
"revolut" => Ok(PaymentMethod::Revolut),
|
||||
"paypal" => Ok(PaymentMethod::Paypal),
|
||||
_ => bail!("Unknown payment method: {}", s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct NostrDomain {
|
||||
pub id: u64,
|
||||
pub owner_id: u64,
|
||||
pub name: String,
|
||||
pub created: DateTime<Utc>,
|
||||
pub enabled: bool,
|
||||
pub relays: Option<String>,
|
||||
pub handles: i64,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct NostrDomainHandle {
|
||||
pub id: u64,
|
||||
pub domain_id: u64,
|
||||
pub handle: String,
|
||||
pub created: DateTime<Utc>,
|
||||
pub pubkey: Vec<u8>,
|
||||
pub relays: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct Company {
|
||||
pub id: u64,
|
||||
pub created: DateTime<Utc>,
|
||||
pub name: String,
|
||||
pub address_1: Option<String>,
|
||||
pub address_2: Option<String>,
|
||||
pub city: Option<String>,
|
||||
pub state: Option<String>,
|
||||
pub country_code: Option<String>,
|
||||
pub tax_id: Option<String>,
|
||||
pub postcode: Option<String>,
|
||||
pub phone: Option<String>,
|
||||
pub email: Option<String>,
|
||||
}
|
||||
|
@ -1,8 +1,4 @@
|
||||
use crate::{
|
||||
IpRange, LNVpsDb, User, UserSshKey, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk,
|
||||
VmCustomTemplate, VmHost, VmHostDisk, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment,
|
||||
VmTemplate,
|
||||
};
|
||||
use crate::{AccessPolicy, Company, IpRange, LNVPSNostrDb, LNVpsDb, NostrDomain, NostrDomainHandle, Router, User, UserSshKey, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate, VmHost, VmHostDisk, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment, VmTemplate};
|
||||
use anyhow::{bail, Error, Result};
|
||||
use async_trait::async_trait;
|
||||
use sqlx::{Executor, MySqlPool, Row};
|
||||
@ -60,14 +56,22 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
async fn update_user(&self, user: &User) -> Result<()> {
|
||||
sqlx::query(
|
||||
"update users set email = ?, contact_nip17 = ?, contact_email = ? where id = ?",
|
||||
"update users set email=?, contact_nip17=?, contact_email=?, country_code=?, billing_name=?, billing_address_1=?, billing_address_2=?, billing_city=?, billing_state=?, billing_postcode=?, billing_tax_id=? where id = ?",
|
||||
)
|
||||
.bind(&user.email)
|
||||
.bind(user.contact_nip17)
|
||||
.bind(user.contact_email)
|
||||
.bind(user.id)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
.bind(&user.email)
|
||||
.bind(user.contact_nip17)
|
||||
.bind(user.contact_email)
|
||||
.bind(&user.country_code)
|
||||
.bind(&user.billing_name)
|
||||
.bind(&user.billing_address_1)
|
||||
.bind(&user.billing_address_2)
|
||||
.bind(&user.billing_city)
|
||||
.bind(&user.billing_state)
|
||||
.bind(&user.billing_postcode)
|
||||
.bind(&user.billing_tax_id)
|
||||
.bind(user.id)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -108,6 +112,13 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn list_host_region(&self) -> Result<Vec<VmHostRegion>> {
|
||||
sqlx::query_as("select * from vm_host_region where enabled=1")
|
||||
.fetch_all(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_host_region(&self, id: u64) -> Result<VmHostRegion> {
|
||||
sqlx::query_as("select * from vm_host_region where id=?")
|
||||
.bind(id)
|
||||
@ -116,8 +127,16 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_host_region_by_name(&self, name: &str) -> Result<VmHostRegion> {
|
||||
sqlx::query_as("select * from vm_host_region where name like ?")
|
||||
.bind(name)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn list_hosts(&self) -> Result<Vec<VmHost>> {
|
||||
sqlx::query_as("select * from vm_host where enabled = 1")
|
||||
sqlx::query_as("select h.* from vm_host h,vm_host_region hr where h.enabled = 1 and h.region_id = hr.id and hr.enabled = 1")
|
||||
.fetch_all(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
@ -158,6 +177,18 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn update_host_disk(&self, disk: &VmHostDisk) -> Result<()> {
|
||||
sqlx::query("update vm_host_disk set size=?,kind=?,interface=? where id=?")
|
||||
.bind(disk.size)
|
||||
.bind(disk.kind)
|
||||
.bind(disk.interface)
|
||||
.bind(disk.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_os_image(&self, id: u64) -> Result<VmOsImage> {
|
||||
sqlx::query_as("select * from vm_os_image where id=?")
|
||||
.bind(id)
|
||||
@ -306,17 +337,18 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
async fn update_vm(&self, vm: &Vm) -> Result<()> {
|
||||
sqlx::query(
|
||||
"update vm set image_id=?,template_id=?,ssh_key_id=?,expires=?,disk_id=? where id=?",
|
||||
"update vm set image_id=?,template_id=?,ssh_key_id=?,expires=?,disk_id=?,mac_address=? where id=?",
|
||||
)
|
||||
.bind(vm.image_id)
|
||||
.bind(vm.template_id)
|
||||
.bind(vm.ssh_key_id)
|
||||
.bind(vm.expires)
|
||||
.bind(vm.disk_id)
|
||||
.bind(vm.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
.bind(vm.image_id)
|
||||
.bind(vm.template_id)
|
||||
.bind(vm.ssh_key_id)
|
||||
.bind(vm.expires)
|
||||
.bind(vm.disk_id)
|
||||
.bind(&vm.mac_address)
|
||||
.bind(vm.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -324,18 +356,18 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
Ok(sqlx::query(
|
||||
"insert into vm_ip_assignment(vm_id,ip_range_id,ip,arp_ref,dns_forward,dns_forward_ref,dns_reverse,dns_reverse_ref) values(?,?,?,?,?,?,?,?) returning id",
|
||||
)
|
||||
.bind(ip_assignment.vm_id)
|
||||
.bind(ip_assignment.ip_range_id)
|
||||
.bind(&ip_assignment.ip)
|
||||
.bind(&ip_assignment.arp_ref)
|
||||
.bind(&ip_assignment.dns_forward)
|
||||
.bind(&ip_assignment.dns_forward_ref)
|
||||
.bind(&ip_assignment.dns_reverse)
|
||||
.bind(&ip_assignment.dns_reverse_ref)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?
|
||||
.try_get(0)?)
|
||||
.bind(ip_assignment.vm_id)
|
||||
.bind(ip_assignment.ip_range_id)
|
||||
.bind(&ip_assignment.ip)
|
||||
.bind(&ip_assignment.arp_ref)
|
||||
.bind(&ip_assignment.dns_forward)
|
||||
.bind(&ip_assignment.dns_forward_ref)
|
||||
.bind(&ip_assignment.dns_reverse)
|
||||
.bind(&ip_assignment.dns_reverse_ref)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?
|
||||
.try_get(0)?)
|
||||
}
|
||||
|
||||
async fn update_vm_ip_assignment(&self, ip_assignment: &VmIpAssignment) -> Result<()> {
|
||||
@ -387,16 +419,20 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
}
|
||||
|
||||
async fn insert_vm_payment(&self, vm_payment: &VmPayment) -> Result<()> {
|
||||
sqlx::query("insert into vm_payment(id,vm_id,created,expires,amount,invoice,time_value,is_paid,rate) values(?,?,?,?,?,?,?,?,?)")
|
||||
sqlx::query("insert into vm_payment(id,vm_id,created,expires,amount,tax,currency,payment_method,time_value,is_paid,rate,external_id,external_data) values(?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
.bind(&vm_payment.id)
|
||||
.bind(vm_payment.vm_id)
|
||||
.bind(vm_payment.created)
|
||||
.bind(vm_payment.expires)
|
||||
.bind(vm_payment.amount)
|
||||
.bind(&vm_payment.invoice)
|
||||
.bind(vm_payment.tax)
|
||||
.bind(&vm_payment.currency)
|
||||
.bind(&vm_payment.payment_method)
|
||||
.bind(vm_payment.time_value)
|
||||
.bind(vm_payment.is_paid)
|
||||
.bind(vm_payment.rate)
|
||||
.bind(&vm_payment.external_id)
|
||||
.bind(&vm_payment.external_data)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
@ -411,6 +447,14 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_vm_payment_by_ext_id(&self, id: &str) -> Result<VmPayment> {
|
||||
sqlx::query_as("select * from vm_payment where external_id=?")
|
||||
.bind(id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn update_vm_payment(&self, vm_payment: &VmPayment) -> Result<()> {
|
||||
sqlx::query("update vm_payment set is_paid = ? where id = ?")
|
||||
.bind(vm_payment.is_paid)
|
||||
@ -428,8 +472,8 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
let mut tx = self.db.begin().await?;
|
||||
|
||||
sqlx::query("update vm_payment set is_paid = true, settle_index = ? where id = ?")
|
||||
.bind(vm_payment.settle_index)
|
||||
sqlx::query("update vm_payment set is_paid = true, external_data = ? where id = ?")
|
||||
.bind(&vm_payment.external_data)
|
||||
.bind(&vm_payment.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
@ -446,7 +490,7 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
async fn last_paid_invoice(&self) -> Result<Option<VmPayment>> {
|
||||
sqlx::query_as(
|
||||
"select * from vm_payment where is_paid = true order by settle_index desc limit 1",
|
||||
"select * from vm_payment where is_paid = true order by created desc limit 1",
|
||||
)
|
||||
.fetch_optional(&self.db)
|
||||
.await
|
||||
@ -498,4 +542,140 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_router(&self, router_id: u64) -> Result<Router> {
|
||||
sqlx::query_as("select * from router where id=?")
|
||||
.bind(router_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_access_policy(&self, access_policy_id: u64) -> Result<AccessPolicy> {
|
||||
sqlx::query_as("select * from access_policy where id=?")
|
||||
.bind(access_policy_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_company(&self, company_id: u64) -> Result<Company> {
|
||||
sqlx::query_as("select * from company where id=?")
|
||||
.bind(company_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "nostr-domain")]
|
||||
#[async_trait]
|
||||
impl LNVPSNostrDb for LNVpsDbMysql {
|
||||
async fn get_handle(&self, handle_id: u64) -> Result<NostrDomainHandle> {
|
||||
sqlx::query_as("select * from nostr_domain_handle where id=?")
|
||||
.bind(handle_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_handle_by_name(&self, domain_id: u64, handle: &str) -> Result<NostrDomainHandle> {
|
||||
sqlx::query_as("select * from nostr_domain_handle where domain_id=? and handle=?")
|
||||
.bind(domain_id)
|
||||
.bind(handle)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn insert_handle(&self, handle: &NostrDomainHandle) -> Result<u64> {
|
||||
Ok(
|
||||
sqlx::query(
|
||||
"insert into nostr_domain_handle(domain_id,handle,pubkey,relays) values(?,?,?,?) returning id",
|
||||
)
|
||||
.bind(handle.domain_id)
|
||||
.bind(&handle.handle)
|
||||
.bind(&handle.pubkey)
|
||||
.bind(&handle.relays)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?
|
||||
.try_get(0)?,
|
||||
)
|
||||
}
|
||||
|
||||
async fn update_handle(&self, handle: &NostrDomainHandle) -> Result<()> {
|
||||
sqlx::query("update nostr_domain_handle set handle=?,pubkey=?,relays=? where id=?")
|
||||
.bind(&handle.handle)
|
||||
.bind(&handle.pubkey)
|
||||
.bind(&handle.relays)
|
||||
.bind(handle.id)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_handle(&self, handle_id: u64) -> Result<()> {
|
||||
sqlx::query("delete from nostr_domain_handle where id=?")
|
||||
.bind(handle_id)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_handles(&self, domain_id: u64) -> Result<Vec<NostrDomainHandle>> {
|
||||
sqlx::query_as("select * from nostr_domain_handle where domain_id=?")
|
||||
.bind(domain_id)
|
||||
.fetch_all(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_domain(&self, id: u64) -> Result<NostrDomain> {
|
||||
sqlx::query_as("select *,(select count(1) from nostr_domain_handle where domain_id=nostr_domain.id) handles from nostr_domain where id=?")
|
||||
.bind(id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_domain_by_name(&self, name: &str) -> Result<NostrDomain> {
|
||||
sqlx::query_as("select *,(select count(1) from nostr_domain_handle where domain_id=nostr_domain.id) handles from nostr_domain where name=?")
|
||||
.bind(name)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn list_domains(&self, owner_id: u64) -> Result<Vec<NostrDomain>> {
|
||||
sqlx::query_as("select *,(select count(1) from nostr_domain_handle where domain_id=nostr_domain.id) handles from nostr_domain where owner_id=?")
|
||||
.bind(owner_id)
|
||||
.fetch_all(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn insert_domain(&self, domain: &NostrDomain) -> Result<u64> {
|
||||
Ok(
|
||||
sqlx::query(
|
||||
"insert into nostr_domain(owner_id,name,relays) values(?,?,?) returning id",
|
||||
)
|
||||
.bind(domain.owner_id)
|
||||
.bind(&domain.name)
|
||||
.bind(&domain.relays)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?
|
||||
.try_get(0)?,
|
||||
)
|
||||
}
|
||||
|
||||
async fn delete_domain(&self, domain_id: u64) -> Result<()> {
|
||||
sqlx::query("update nostr_domain set deleted = current_timestamp where id = ?")
|
||||
.bind(domain_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
7
lnvps_nostr/Cargo.lock
generated
Normal file
7
lnvps_nostr/Cargo.lock
generated
Normal file
@ -0,0 +1,7 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "lnvps_nostr"
|
||||
version = "0.1.0"
|
17
lnvps_nostr/Cargo.toml
Normal file
17
lnvps_nostr/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "lnvps_nostr"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
lnvps_db = { path = "../lnvps_db", features = ["nostr-domain"] }
|
||||
lnvps_common = { path = "../lnvps_common" }
|
||||
env_logger.workspace = true
|
||||
log.workspace = true
|
||||
anyhow.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
config.workspace = true
|
||||
serde_json.workspace = true
|
||||
rocket.workspace = true
|
||||
hex.workspace = true
|
3
lnvps_nostr/README.md
Normal file
3
lnvps_nostr/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# LNVPS Nostr Services
|
||||
|
||||
A simple webserver hosting various nostr based services for lnvps.net
|
5
lnvps_nostr/config.yaml
Normal file
5
lnvps_nostr/config.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
# Connection string to lnvps database
|
||||
db: "mysql://root:root@localhost:3376/lnvps"
|
||||
|
||||
# Listen address for http server
|
||||
listen: "127.0.0.1:8001"
|
47
lnvps_nostr/index.html
Normal file
47
lnvps_nostr/index.html
Normal file
@ -0,0 +1,47 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>LNVPS</title>
|
||||
<meta charset="UTF-8"/>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css2?family=Source+Code+Pro:ital,wght@0,200..900;1,200..900&display=swap"
|
||||
rel="stylesheet"
|
||||
/>
|
||||
<style>
|
||||
html, body {
|
||||
margin: 0;
|
||||
font-size: 15px;
|
||||
font-family: "Source Code Pro", monospace;
|
||||
color: white;
|
||||
background-color: black;
|
||||
}
|
||||
|
||||
.page {
|
||||
margin-left: 4rem;
|
||||
margin-right: 4rem;
|
||||
}
|
||||
|
||||
.header {
|
||||
display: flex;
|
||||
gap: 2rem;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
font-size: 3rem;
|
||||
margin: 2rem 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="page">
|
||||
<div class="header">
|
||||
LNVPS
|
||||
<img height="48" width="48" src="https://lnvps.net/logo.jpg" alt="logo"/>
|
||||
</div>
|
||||
<hr/>
|
||||
<p>This domain is using LNVPS' free NIP-05 hosting, get your own at <a href="https://lnvps.net">LNVPS.net</a></p>
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
65
lnvps_nostr/src/main.rs
Normal file
65
lnvps_nostr/src/main.rs
Normal file
@ -0,0 +1,65 @@
|
||||
mod routes;
|
||||
|
||||
use crate::routes::routes;
|
||||
use anyhow::Result;
|
||||
use config::{Config, File};
|
||||
use lnvps_common::CORS;
|
||||
use lnvps_db::{LNVPSNostrDb, LNVpsDbMysql};
|
||||
use log::error;
|
||||
use rocket::http::Method;
|
||||
use serde::Deserialize;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone, Deserialize)]
|
||||
struct Settings {
|
||||
/// Database connection string
|
||||
db: String,
|
||||
/// Listen address for http server
|
||||
listen: Option<String>,
|
||||
}
|
||||
|
||||
#[rocket::main]
|
||||
async fn main() -> Result<()> {
|
||||
env_logger::init();
|
||||
|
||||
let settings: Settings = Config::builder()
|
||||
.add_source(File::from(PathBuf::from("config.yaml")))
|
||||
.build()?
|
||||
.try_deserialize()?;
|
||||
|
||||
// Connect database
|
||||
let db = LNVpsDbMysql::new(&settings.db).await?;
|
||||
let db: Arc<dyn LNVPSNostrDb> = Arc::new(db);
|
||||
|
||||
let mut config = rocket::Config::default();
|
||||
let ip: SocketAddr = match &settings.listen {
|
||||
Some(i) => i.parse()?,
|
||||
None => SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 8000),
|
||||
};
|
||||
config.address = ip.ip();
|
||||
config.port = ip.port();
|
||||
|
||||
if let Err(e) = rocket::Rocket::custom(config)
|
||||
.manage(db.clone())
|
||||
.manage(settings.clone())
|
||||
.attach(CORS)
|
||||
.mount("/", routes())
|
||||
.mount(
|
||||
"/",
|
||||
vec![rocket::Route::ranked(
|
||||
isize::MAX,
|
||||
Method::Options,
|
||||
"/<catch_all_options_route..>",
|
||||
CORS,
|
||||
)],
|
||||
)
|
||||
.launch()
|
||||
.await
|
||||
{
|
||||
error!("{}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
72
lnvps_nostr/src/routes.rs
Normal file
72
lnvps_nostr/src/routes.rs
Normal file
@ -0,0 +1,72 @@
|
||||
use lnvps_db::LNVPSNostrDb;
|
||||
use log::info;
|
||||
use rocket::request::{FromRequest, Outcome};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{Request, Route, State, routes};
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use rocket::http::ContentType;
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
routes![get_index, nostr_address]
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct NostrJson {
|
||||
pub names: HashMap<String, String>,
|
||||
pub relays: HashMap<String, Vec<String>>,
|
||||
}
|
||||
|
||||
struct HostInfo<'r> {
|
||||
pub host: Option<&'r str>,
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl<'r> FromRequest<'r> for HostInfo<'r> {
|
||||
type Error = ();
|
||||
|
||||
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
|
||||
Outcome::Success(HostInfo {
|
||||
host: request.host().map(|h| h.domain().as_str()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[rocket::get("/", format = "html")]
|
||||
fn get_index() -> (ContentType, &'static str) {
|
||||
const HTML: &str = include_str!("../index.html");
|
||||
(ContentType::HTML, HTML)
|
||||
}
|
||||
|
||||
#[rocket::get("/.well-known/nostr.json?<name>")]
|
||||
async fn nostr_address(
|
||||
host: HostInfo<'_>,
|
||||
db: &State<Arc<dyn LNVPSNostrDb>>,
|
||||
name: Option<&str>,
|
||||
) -> Result<Json<NostrJson>, &'static str> {
|
||||
let name = name.unwrap_or("_");
|
||||
let host = host.host.unwrap_or("lnvps.net");
|
||||
info!("Got request for {} on host {}", name, host);
|
||||
let domain = db
|
||||
.get_domain_by_name(host)
|
||||
.await
|
||||
.map_err(|_| "Domain not found")?;
|
||||
let handle = db
|
||||
.get_handle_by_name(domain.id, name)
|
||||
.await
|
||||
.map_err(|_| "Handle not found")?;
|
||||
|
||||
let pubkey_hex = hex::encode(handle.pubkey);
|
||||
let relays = if let Some(r) = handle.relays {
|
||||
r.split(",").map(|x| x.to_string()).collect()
|
||||
} else if let Some(r) = domain.relays {
|
||||
r.split(",").map(|x| x.to_string()).collect()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
Ok(Json(NostrJson {
|
||||
names: HashMap::from([(name.to_string(), pubkey_hex.clone())]),
|
||||
relays: HashMap::from([(pubkey_hex, relays)]),
|
||||
}))
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient};
|
||||
use crate::status::VmState;
|
||||
use lnvps_db::{async_trait, Vm, VmOsImage};
|
||||
|
||||
pub struct LibVirt {}
|
||||
|
||||
#[async_trait]
|
||||
impl VmHostClient for LibVirt {
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn generate_mac(&self, vm: &Vm) -> anyhow::Result<String> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn start_vm(&self, vm: &Vm) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn stop_vm(&self, vm: &Vm) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn reset_vm(&self, vm: &Vm) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn create_vm(&self, cfg: &FullVmInfo) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_vm_state(&self, vm: &Vm) -> anyhow::Result<VmState> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn configure_vm(&self, vm: &Vm) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_time_series_data(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
series: TimeSeries,
|
||||
) -> anyhow::Result<Vec<TimeSeriesData>> {
|
||||
todo!()
|
||||
}
|
||||
}
|
188
src/host/mod.rs
188
src/host/mod.rs
@ -1,188 +0,0 @@
|
||||
use crate::settings::ProvisionerConfig;
|
||||
use crate::status::VmState;
|
||||
use anyhow::{bail, Result};
|
||||
use futures::future::join_all;
|
||||
use lnvps_db::{
|
||||
async_trait, IpRange, LNVpsDb, UserSshKey, Vm, VmCustomTemplate, VmHost, VmHostDisk,
|
||||
VmHostKind, VmIpAssignment, VmOsImage, VmTemplate,
|
||||
};
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(feature = "libvirt")]
|
||||
mod libvirt;
|
||||
#[cfg(feature = "proxmox")]
|
||||
mod proxmox;
|
||||
|
||||
/// Generic type for creating VM's
|
||||
#[async_trait]
|
||||
pub trait VmHostClient: Send + Sync {
|
||||
/// Download OS image to the host
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> Result<()>;
|
||||
|
||||
/// Create a random MAC address for the NIC
|
||||
async fn generate_mac(&self, vm: &Vm) -> Result<String>;
|
||||
|
||||
/// Start a VM
|
||||
async fn start_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Stop a VM
|
||||
async fn stop_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Reset VM (Hard)
|
||||
async fn reset_vm(&self, vm: &Vm) -> Result<()>;
|
||||
|
||||
/// Spawn a VM
|
||||
async fn create_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Get the running status of a VM
|
||||
async fn get_vm_state(&self, vm: &Vm) -> Result<VmState>;
|
||||
|
||||
/// Apply vm configuration (patch)
|
||||
async fn configure_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Get resource usage data
|
||||
async fn get_time_series_data(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
series: TimeSeries,
|
||||
) -> Result<Vec<TimeSeriesData>>;
|
||||
}
|
||||
|
||||
pub fn get_host_client(host: &VmHost, cfg: &ProvisionerConfig) -> Result<Arc<dyn VmHostClient>> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
Ok(Arc::new(crate::mocks::MockVmHost::new()))
|
||||
}
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
Ok(match (host.kind.clone(), &cfg) {
|
||||
#[cfg(feature = "proxmox")]
|
||||
(
|
||||
VmHostKind::Proxmox,
|
||||
ProvisionerConfig::Proxmox {
|
||||
qemu,
|
||||
ssh,
|
||||
mac_prefix,
|
||||
},
|
||||
) => Arc::new(proxmox::ProxmoxClient::new(
|
||||
host.ip.parse()?,
|
||||
&host.name,
|
||||
&host.api_token,
|
||||
mac_prefix.clone(),
|
||||
qemu.clone(),
|
||||
ssh.clone(),
|
||||
)),
|
||||
_ => bail!("Unknown host config: {}", host.kind),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// All VM info necessary to provision a VM and its associated resources
|
||||
pub struct FullVmInfo {
|
||||
/// Instance to create
|
||||
pub vm: Vm,
|
||||
/// Disk where this VM will be saved on the host
|
||||
pub disk: VmHostDisk,
|
||||
/// VM template resources
|
||||
pub template: Option<VmTemplate>,
|
||||
/// VM custom template resources
|
||||
pub custom_template: Option<VmCustomTemplate>,
|
||||
/// The OS image used to create the VM
|
||||
pub image: VmOsImage,
|
||||
/// List of IP resources assigned to this VM
|
||||
pub ips: Vec<VmIpAssignment>,
|
||||
/// Ranges associated with [ips]
|
||||
pub ranges: Vec<IpRange>,
|
||||
/// SSH key to access the VM
|
||||
pub ssh_key: UserSshKey,
|
||||
}
|
||||
|
||||
impl FullVmInfo {
|
||||
pub async fn load(vm_id: u64, db: Arc<dyn LNVpsDb>) -> Result<Self> {
|
||||
let vm = db.get_vm(vm_id).await?;
|
||||
let image = db.get_os_image(vm.image_id).await?;
|
||||
let disk = db.get_host_disk(vm.disk_id).await?;
|
||||
let ssh_key = db.get_user_ssh_key(vm.ssh_key_id).await?;
|
||||
let ips = db.list_vm_ip_assignments(vm_id).await?;
|
||||
|
||||
let ip_range_ids: HashSet<u64> = ips.iter().map(|i| i.ip_range_id).collect();
|
||||
let ip_ranges: Vec<_> = ip_range_ids.iter().map(|i| db.get_ip_range(*i)).collect();
|
||||
let ranges: Vec<IpRange> = join_all(ip_ranges)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
|
||||
let template = if let Some(t) = vm.template_id {
|
||||
Some(db.get_vm_template(t).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let custom_template = if let Some(t) = vm.custom_template_id {
|
||||
Some(db.get_custom_vm_template(t).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// create VM
|
||||
Ok(FullVmInfo {
|
||||
vm,
|
||||
template,
|
||||
custom_template,
|
||||
image,
|
||||
ips,
|
||||
disk,
|
||||
ranges,
|
||||
ssh_key,
|
||||
})
|
||||
}
|
||||
|
||||
/// CPU cores
|
||||
pub fn resources(&self) -> Result<VmResources> {
|
||||
if let Some(t) = &self.template {
|
||||
Ok(VmResources {
|
||||
cpu: t.cpu,
|
||||
memory: t.memory,
|
||||
disk_size: t.disk_size,
|
||||
})
|
||||
} else if let Some(t) = &self.custom_template {
|
||||
Ok(VmResources {
|
||||
cpu: t.cpu,
|
||||
memory: t.memory,
|
||||
disk_size: t.disk_size,
|
||||
})
|
||||
} else {
|
||||
bail!("Invalid VM config, no template");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct VmResources {
|
||||
pub cpu: u16,
|
||||
pub memory: u64,
|
||||
pub disk_size: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct TimeSeriesData {
|
||||
pub timestamp: u64,
|
||||
pub cpu: f32,
|
||||
pub memory: f32,
|
||||
pub memory_size: u64,
|
||||
pub net_in: f32,
|
||||
pub net_out: f32,
|
||||
pub disk_write: f32,
|
||||
pub disk_read: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum TimeSeries {
|
||||
Hourly,
|
||||
Daily,
|
||||
Weekly,
|
||||
Monthly,
|
||||
Yearly,
|
||||
}
|
@ -1,64 +0,0 @@
|
||||
use crate::lightning::{InvoiceUpdate, LightningNode};
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::{error, info, warn};
|
||||
use nostr::util::hex;
|
||||
use rocket::futures::StreamExt;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
pub struct InvoiceHandler {
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
}
|
||||
|
||||
impl InvoiceHandler {
|
||||
pub fn new(
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
) -> Self {
|
||||
Self { node, tx, db }
|
||||
}
|
||||
|
||||
async fn mark_paid(&self, settle_index: u64, id: &Vec<u8>) -> Result<()> {
|
||||
let mut p = self.db.get_vm_payment(id).await?;
|
||||
p.settle_index = Some(settle_index);
|
||||
self.db.vm_payment_paid(&p).await?;
|
||||
|
||||
info!("VM payment {} for {}, paid", hex::encode(p.id), p.vm_id);
|
||||
self.tx.send(WorkJob::CheckVm { vm_id: p.vm_id })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self) -> Result<()> {
|
||||
let from_ph = self.db.last_paid_invoice().await?.map(|i| i.id.clone());
|
||||
info!(
|
||||
"Listening for invoices from {}",
|
||||
from_ph
|
||||
.as_ref()
|
||||
.map(hex::encode)
|
||||
.unwrap_or("NOW".to_string())
|
||||
);
|
||||
|
||||
let mut handler = self.node.subscribe_invoices(from_ph).await?;
|
||||
while let Some(msg) = handler.next().await {
|
||||
match msg {
|
||||
InvoiceUpdate::Settled {
|
||||
payment_hash,
|
||||
settle_index,
|
||||
} => {
|
||||
let r_hash = hex::decode(payment_hash)?;
|
||||
if let Err(e) = self.mark_paid(settle_index, &r_hash).await {
|
||||
error!("{}", e);
|
||||
}
|
||||
}
|
||||
v => warn!("Unknown invoice update: {:?}", v),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
use anyhow::bail;
|
||||
use log::debug;
|
||||
use reqwest::header::{HeaderMap, AUTHORIZATION};
|
||||
use reqwest::{Client, Method, Url};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
|
||||
pub struct JsonApi {
|
||||
pub client: Client,
|
||||
pub base: Url,
|
||||
}
|
||||
|
||||
impl JsonApi {
|
||||
pub fn token(base: &str, token: &str) -> anyhow::Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(AUTHORIZATION, token.parse()?);
|
||||
|
||||
let client = Client::builder().default_headers(headers).build()?;
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get<T: DeserializeOwned>(&self, path: &str) -> anyhow::Result<T> {
|
||||
debug!(">> GET {}", path);
|
||||
let rsp = self.client.get(self.base.join(path)?).send().await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(serde_json::from_str(&text)?)
|
||||
} else {
|
||||
bail!("{}", status);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn post<T: DeserializeOwned, R: Serialize>(
|
||||
&self,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> anyhow::Result<T> {
|
||||
self.req(Method::POST, path, body).await
|
||||
}
|
||||
|
||||
pub async fn req<T: DeserializeOwned, R: Serialize>(
|
||||
&self,
|
||||
method: Method,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> anyhow::Result<T> {
|
||||
let body = serde_json::to_string(&body)?;
|
||||
debug!(">> {} {}: {}", method.clone(), path, &body);
|
||||
let rsp = self
|
||||
.client
|
||||
.request(method.clone(), self.base.join(path)?)
|
||||
.header("Content-Type", "application/json")
|
||||
.header("Accept", "application/json")
|
||||
.body(body)
|
||||
.send()
|
||||
.await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(serde_json::from_str(&text)?)
|
||||
} else {
|
||||
bail!("{} {}: {}: {}", method, path, status, &text);
|
||||
}
|
||||
}
|
||||
}
|
19
src/lib.rs
19
src/lib.rs
@ -1,19 +0,0 @@
|
||||
pub mod api;
|
||||
pub mod cors;
|
||||
pub mod dns;
|
||||
pub mod exchange;
|
||||
pub mod host;
|
||||
pub mod invoice;
|
||||
pub mod json_api;
|
||||
pub mod lightning;
|
||||
pub mod nip98;
|
||||
pub mod provisioner;
|
||||
pub mod router;
|
||||
pub mod settings;
|
||||
#[cfg(feature = "proxmox")]
|
||||
pub mod ssh_client;
|
||||
pub mod status;
|
||||
pub mod worker;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod mocks;
|
@ -1,82 +0,0 @@
|
||||
use crate::api::WEBHOOK_BRIDGE;
|
||||
use crate::json_api::JsonApi;
|
||||
use crate::lightning::{AddInvoiceRequest, AddInvoiceResult, InvoiceUpdate, LightningNode};
|
||||
use anyhow::bail;
|
||||
use futures::{Stream, StreamExt};
|
||||
use lnvps_db::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::pin::Pin;
|
||||
use tokio_stream::wrappers::BroadcastStream;
|
||||
|
||||
pub struct BitvoraNode {
|
||||
api: JsonApi,
|
||||
webhook_secret: String,
|
||||
}
|
||||
|
||||
impl BitvoraNode {
|
||||
pub fn new(api_token: &str, webhook_secret: &str) -> Self {
|
||||
let auth = format!("Bearer {}", api_token);
|
||||
Self {
|
||||
api: JsonApi::token("https://api.bitvora.com/", &auth).unwrap(),
|
||||
webhook_secret: webhook_secret.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LightningNode for BitvoraNode {
|
||||
async fn add_invoice(&self, req: AddInvoiceRequest) -> anyhow::Result<AddInvoiceResult> {
|
||||
let req = CreateInvoiceRequest {
|
||||
amount: req.amount / 1000,
|
||||
currency: "sats".to_string(),
|
||||
description: req.memo.unwrap_or_default(),
|
||||
expiry_seconds: req.expire.unwrap_or(3600) as u64,
|
||||
};
|
||||
let rsp: BitvoraResponse<CreateInvoiceResponse> = self
|
||||
.api
|
||||
.post("/v1/bitcoin/deposit/lightning-invoice", req)
|
||||
.await?;
|
||||
if rsp.status >= 400 {
|
||||
bail!(
|
||||
"API error: {} {}",
|
||||
rsp.status,
|
||||
rsp.message.unwrap_or_default()
|
||||
);
|
||||
}
|
||||
Ok(AddInvoiceResult {
|
||||
pr: rsp.data.payment_request,
|
||||
payment_hash: rsp.data.r_hash,
|
||||
})
|
||||
}
|
||||
|
||||
async fn subscribe_invoices(
|
||||
&self,
|
||||
_from_payment_hash: Option<Vec<u8>>,
|
||||
) -> anyhow::Result<Pin<Box<dyn Stream<Item = InvoiceUpdate> + Send>>> {
|
||||
let rx = BroadcastStream::new(WEBHOOK_BRIDGE.listen());
|
||||
let mapped = rx.then(|r| async move { InvoiceUpdate::Unknown });
|
||||
Ok(Box::pin(mapped))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct CreateInvoiceRequest {
|
||||
pub amount: u64,
|
||||
pub currency: String,
|
||||
pub description: String,
|
||||
pub expiry_seconds: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
struct BitvoraResponse<T> {
|
||||
pub status: isize,
|
||||
pub message: Option<String>,
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct CreateInvoiceResponse {
|
||||
pub id: String,
|
||||
pub r_hash: String,
|
||||
pub payment_request: String,
|
||||
}
|
@ -1,603 +0,0 @@
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::exchange::{ExchangeRateService, Ticker};
|
||||
use crate::host::{get_host_client, FullVmInfo};
|
||||
use crate::lightning::{AddInvoiceRequest, LightningNode};
|
||||
use crate::provisioner::{
|
||||
CostResult, HostCapacityService, NetworkProvisioner, PricingEngine, ProvisionerMethod,
|
||||
};
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use crate::settings::{NetworkAccessPolicy, NetworkPolicy, ProvisionerConfig, Settings};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use chrono::{Days, Months, Utc};
|
||||
use lnvps_db::{DiskType, LNVpsDb, Vm, VmCostPlanIntervalType, VmCustomTemplate, VmIpAssignment, VmPayment};
|
||||
use log::{info, warn};
|
||||
use nostr::util::hex;
|
||||
use std::ops::Add;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Main provisioner class for LNVPS
|
||||
///
|
||||
/// Does all the hard work and logic for creating / expiring VM's
|
||||
pub struct LNVpsProvisioner {
|
||||
read_only: bool,
|
||||
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
node: Arc<dyn LightningNode>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
|
||||
router: Option<Arc<dyn Router>>,
|
||||
dns: Option<Arc<dyn DnsServer>>,
|
||||
|
||||
network_policy: NetworkPolicy,
|
||||
provisioner_config: ProvisionerConfig,
|
||||
}
|
||||
|
||||
impl LNVpsProvisioner {
|
||||
pub fn new(
|
||||
settings: Settings,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
node: Arc<dyn LightningNode>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
node,
|
||||
rates,
|
||||
router: settings.get_router().expect("router config"),
|
||||
dns: settings.get_dns().expect("dns config"),
|
||||
network_policy: settings.network_policy,
|
||||
provisioner_config: settings.provisioner,
|
||||
read_only: settings.read_only,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create or Update access policy for a given ip assignment, does not save to database!
|
||||
pub async fn update_access_policy(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// apply network policy
|
||||
if let NetworkAccessPolicy::StaticArp { interface } = &self.network_policy.access {
|
||||
if let Some(r) = self.router.as_ref() {
|
||||
let vm = self.db.get_vm(assignment.vm_id).await?;
|
||||
let entry = ArpEntry::new(&vm, assignment, Some(interface.clone()))?;
|
||||
let arp = if let Some(_id) = &assignment.arp_ref {
|
||||
r.update_arp_entry(&entry).await?
|
||||
} else {
|
||||
r.add_arp_entry(&entry).await?
|
||||
};
|
||||
ensure!(arp.id.is_some(), "ARP id was empty");
|
||||
assignment.arp_ref = arp.id;
|
||||
} else {
|
||||
bail!("No router found to apply static arp entry!")
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove an access policy for a given ip assignment, does not save to database!
|
||||
pub async fn remove_access_policy(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// Delete access policy
|
||||
if let NetworkAccessPolicy::StaticArp { .. } = &self.network_policy.access {
|
||||
if let Some(r) = self.router.as_ref() {
|
||||
let id = if let Some(id) = &assignment.arp_ref {
|
||||
Some(id.clone())
|
||||
} else {
|
||||
warn!("ARP REF not found, using arp list");
|
||||
|
||||
let ent = r.list_arp_entry().await?;
|
||||
if let Some(ent) = ent.iter().find(|e| e.address == assignment.ip) {
|
||||
ent.id.clone()
|
||||
} else {
|
||||
warn!("ARP entry not found, skipping");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(id) = id {
|
||||
if let Err(e) = r.remove_arp_entry(&id).await {
|
||||
warn!("Failed to remove arp entry, skipping: {}", e);
|
||||
}
|
||||
}
|
||||
assignment.arp_ref = None;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete DNS on the dns server, does not save to database!
|
||||
pub async fn remove_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// Delete forward/reverse dns
|
||||
if let Some(dns) = &self.dns {
|
||||
if let Some(_r) = &assignment.dns_reverse_ref {
|
||||
let rev = BasicRecord::reverse(assignment)?;
|
||||
if let Err(e) = dns.delete_record(&rev).await {
|
||||
warn!("Failed to delete reverse record: {}", e);
|
||||
}
|
||||
assignment.dns_reverse_ref = None;
|
||||
assignment.dns_reverse = None;
|
||||
}
|
||||
if let Some(_r) = &assignment.dns_forward_ref {
|
||||
let rev = BasicRecord::forward(assignment)?;
|
||||
if let Err(e) = dns.delete_record(&rev).await {
|
||||
warn!("Failed to delete forward record: {}", e);
|
||||
}
|
||||
assignment.dns_forward_ref = None;
|
||||
assignment.dns_forward = None;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update DNS on the dns server, does not save to database!
|
||||
pub async fn update_forward_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
if let Some(dns) = &self.dns {
|
||||
let fwd = BasicRecord::forward(assignment)?;
|
||||
let ret_fwd = if fwd.id.is_some() {
|
||||
dns.update_record(&fwd).await?
|
||||
} else {
|
||||
dns.add_record(&fwd).await?
|
||||
};
|
||||
assignment.dns_forward = Some(ret_fwd.name);
|
||||
assignment.dns_forward_ref = Some(ret_fwd.id.context("Record id is missing")?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update DNS on the dns server, does not save to database!
|
||||
pub async fn update_reverse_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
if let Some(dns) = &self.dns {
|
||||
let ret_rev = if assignment.dns_reverse_ref.is_some() {
|
||||
dns.update_record(&BasicRecord::reverse(assignment)?)
|
||||
.await?
|
||||
} else {
|
||||
dns.add_record(&BasicRecord::reverse_to_fwd(assignment)?)
|
||||
.await?
|
||||
};
|
||||
assignment.dns_reverse = Some(ret_rev.value);
|
||||
assignment.dns_reverse_ref = Some(ret_rev.id.context("Record id is missing")?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete all ip assignments for a given vm
|
||||
pub async fn delete_ip_assignments(&self, vm_id: u64) -> Result<()> {
|
||||
let ips = self.db.list_vm_ip_assignments(vm_id).await?;
|
||||
for mut ip in ips {
|
||||
// remove access policy
|
||||
self.remove_access_policy(&mut ip).await?;
|
||||
// remove dns
|
||||
self.remove_ip_dns(&mut ip).await?;
|
||||
// save arp/dns changes
|
||||
self.db.update_vm_ip_assignment(&ip).await?;
|
||||
}
|
||||
// mark as deleted
|
||||
self.db.delete_vm_ip_assignment(vm_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn save_ip_assignment(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// apply access policy
|
||||
self.update_access_policy(assignment).await?;
|
||||
|
||||
// Add DNS records
|
||||
self.update_forward_ip_dns(assignment).await?;
|
||||
self.update_reverse_ip_dns(assignment).await?;
|
||||
|
||||
// save to db
|
||||
self.db.insert_vm_ip_assignment(assignment).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn allocate_ips(&self, vm_id: u64) -> Result<Vec<VmIpAssignment>> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
let existing_ips = self.db.list_vm_ip_assignments(vm_id).await?;
|
||||
if !existing_ips.is_empty() {
|
||||
return Ok(existing_ips);
|
||||
}
|
||||
|
||||
// Use random network provisioner
|
||||
let network = NetworkProvisioner::new(ProvisionerMethod::Random, self.db.clone());
|
||||
|
||||
let host = self.db.get_host(vm.host_id).await?;
|
||||
let ip = network.pick_ip_for_region(host.region_id).await?;
|
||||
let mut assignment = VmIpAssignment {
|
||||
id: 0,
|
||||
vm_id,
|
||||
ip_range_id: ip.range_id,
|
||||
ip: ip.ip.to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
};
|
||||
|
||||
self.save_ip_assignment(&mut assignment).await?;
|
||||
Ok(vec![assignment])
|
||||
}
|
||||
|
||||
/// Do any necessary initialization
|
||||
pub async fn init(&self) -> Result<()> {
|
||||
let hosts = self.db.list_hosts().await?;
|
||||
let images = self.db.list_os_image().await?;
|
||||
for host in hosts {
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
for image in &images {
|
||||
if let Err(e) = client.download_os_image(image).await {
|
||||
warn!(
|
||||
"Error downloading image {} on {}: {}",
|
||||
image.url, host.name, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Provision a new VM for a user on the database
|
||||
///
|
||||
/// Note:
|
||||
/// 1. Does not create a VM on the host machine
|
||||
/// 2. Does not assign any IP resources
|
||||
pub async fn provision(
|
||||
&self,
|
||||
user_id: u64,
|
||||
template_id: u64,
|
||||
image_id: u64,
|
||||
ssh_key_id: u64,
|
||||
ref_code: Option<String>,
|
||||
) -> Result<Vm> {
|
||||
let user = self.db.get_user(user_id).await?;
|
||||
let template = self.db.get_vm_template(template_id).await?;
|
||||
let image = self.db.get_os_image(image_id).await?;
|
||||
let ssh_key = self.db.get_user_ssh_key(ssh_key_id).await?;
|
||||
|
||||
// TODO: cache capacity somewhere
|
||||
let cap = HostCapacityService::new(self.db.clone());
|
||||
let host = cap.get_host_for_template(template.region_id, &template).await?;
|
||||
|
||||
let pick_disk = if let Some(hd) = host.disks.first() {
|
||||
hd
|
||||
} else {
|
||||
bail!("No host disk found")
|
||||
};
|
||||
|
||||
let client = get_host_client(&host.host, &self.provisioner_config)?;
|
||||
let mut new_vm = Vm {
|
||||
id: 0,
|
||||
host_id: host.host.id,
|
||||
user_id: user.id,
|
||||
image_id: image.id,
|
||||
template_id: Some(template.id),
|
||||
custom_template_id: None,
|
||||
ssh_key_id: ssh_key.id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now(),
|
||||
disk_id: pick_disk.disk.id,
|
||||
mac_address: "NOT FILLED YET".to_string(),
|
||||
deleted: false,
|
||||
ref_code,
|
||||
};
|
||||
|
||||
// ask host client to generate the mac address
|
||||
new_vm.mac_address = client.generate_mac(&new_vm).await?;
|
||||
|
||||
let new_id = self.db.insert_vm(&new_vm).await?;
|
||||
new_vm.id = new_id;
|
||||
Ok(new_vm)
|
||||
}
|
||||
|
||||
/// Provision a new VM for a user on the database
|
||||
///
|
||||
/// Note:
|
||||
/// 1. Does not create a VM on the host machine
|
||||
/// 2. Does not assign any IP resources
|
||||
pub async fn provision_custom(
|
||||
&self,
|
||||
user_id: u64,
|
||||
template: VmCustomTemplate,
|
||||
image_id: u64,
|
||||
ssh_key_id: u64,
|
||||
ref_code: Option<String>,
|
||||
) -> Result<Vm> {
|
||||
let user = self.db.get_user(user_id).await?;
|
||||
let pricing = self.db.get_vm_template(template.pricing_id).await?;
|
||||
let image = self.db.get_os_image(image_id).await?;
|
||||
let ssh_key = self.db.get_user_ssh_key(ssh_key_id).await?;
|
||||
|
||||
// TODO: cache capacity somewhere
|
||||
let cap = HostCapacityService::new(self.db.clone());
|
||||
let host = cap.get_host_for_template(pricing.region_id, &template).await?;
|
||||
|
||||
let pick_disk = if let Some(hd) = host.disks.first() {
|
||||
hd
|
||||
} else {
|
||||
bail!("No host disk found")
|
||||
};
|
||||
|
||||
// insert custom templates
|
||||
let template_id = self.db.insert_custom_vm_template(&template).await?;
|
||||
|
||||
let client = get_host_client(&host.host, &self.provisioner_config)?;
|
||||
let mut new_vm = Vm {
|
||||
id: 0,
|
||||
host_id: host.host.id,
|
||||
user_id: user.id,
|
||||
image_id: image.id,
|
||||
template_id: None,
|
||||
custom_template_id: Some(template_id),
|
||||
ssh_key_id: ssh_key.id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now(),
|
||||
disk_id: pick_disk.disk.id,
|
||||
mac_address: "NOT FILLED YET".to_string(),
|
||||
deleted: false,
|
||||
ref_code,
|
||||
};
|
||||
|
||||
// ask host client to generate the mac address
|
||||
new_vm.mac_address = client.generate_mac(&new_vm).await?;
|
||||
|
||||
let new_id = self.db.insert_vm(&new_vm).await?;
|
||||
new_vm.id = new_id;
|
||||
Ok(new_vm)
|
||||
}
|
||||
|
||||
/// Create a renewal payment
|
||||
pub async fn renew(&self, vm_id: u64) -> Result<VmPayment> {
|
||||
let pe = PricingEngine::new(self.db.clone(), self.rates.clone());
|
||||
|
||||
let price = pe.get_vm_cost(vm_id).await?;
|
||||
match price {
|
||||
CostResult::Existing(p) => Ok(p),
|
||||
CostResult::New {
|
||||
msats,
|
||||
time_value,
|
||||
new_expiry,
|
||||
rate,
|
||||
} => {
|
||||
const INVOICE_EXPIRE: u64 = 600;
|
||||
info!("Creating invoice for {vm_id} for {} sats", msats / 1000);
|
||||
let invoice = self
|
||||
.node
|
||||
.add_invoice(AddInvoiceRequest {
|
||||
memo: Some(format!("VM renewal {vm_id} to {new_expiry}")),
|
||||
amount: msats,
|
||||
expire: Some(INVOICE_EXPIRE as u32),
|
||||
})
|
||||
.await?;
|
||||
let vm_payment = VmPayment {
|
||||
id: hex::decode(invoice.payment_hash)?,
|
||||
vm_id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now().add(Duration::from_secs(INVOICE_EXPIRE)),
|
||||
amount: msats,
|
||||
invoice: invoice.pr,
|
||||
time_value,
|
||||
is_paid: false,
|
||||
rate,
|
||||
settle_index: None,
|
||||
};
|
||||
self.db.insert_vm_payment(&vm_payment).await?;
|
||||
|
||||
Ok(vm_payment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vm on the host as configured by the template
|
||||
pub async fn spawn_vm(&self, vm_id: u64) -> Result<()> {
|
||||
if self.read_only {
|
||||
bail!("Cant spawn VM's in read-only mode")
|
||||
}
|
||||
// setup network by allocating some IP space
|
||||
self.allocate_ips(vm_id).await?;
|
||||
|
||||
// load full info
|
||||
let info = FullVmInfo::load(vm_id, self.db.clone()).await?;
|
||||
|
||||
// load host client
|
||||
let host = self.db.get_host(info.vm.host_id).await?;
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
client.create_vm(&info).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a VM and its associated resources
|
||||
pub async fn delete_vm(&self, vm_id: u64) -> Result<()> {
|
||||
// host client currently doesn't support delete (proxmox)
|
||||
// VM should already be stopped by [Worker]
|
||||
|
||||
self.delete_ip_assignments(vm_id).await?;
|
||||
self.db.delete_vm(vm_id).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop a running VM
|
||||
pub async fn stop_vm(&self, vm_id: u64) -> Result<()> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
let host = self.db.get_host(vm.host_id).await?;
|
||||
|
||||
let client = get_host_client(&host, &self.provisioner_config)?;
|
||||
client.stop_vm(&vm).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::exchange::DefaultRateCache;
|
||||
use crate::mocks::{MockDb, MockDnsServer, MockNode, MockRouter};
|
||||
use crate::settings::{DnsServerConfig, LightningConfig, QemuConfig, RouterConfig};
|
||||
use lnvps_db::{DiskInterface, DiskType, User, UserSshKey, VmTemplate};
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
|
||||
const ROUTER_BRIDGE: &str = "bridge1";
|
||||
|
||||
fn settings() -> Settings {
|
||||
Settings {
|
||||
listen: None,
|
||||
db: "".to_string(),
|
||||
lightning: LightningConfig::LND {
|
||||
url: "".to_string(),
|
||||
cert: Default::default(),
|
||||
macaroon: Default::default(),
|
||||
},
|
||||
read_only: false,
|
||||
provisioner: ProvisionerConfig::Proxmox {
|
||||
qemu: QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr1".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
vlan: None,
|
||||
kvm: false,
|
||||
},
|
||||
ssh: None,
|
||||
mac_prefix: Some("ff:ff:ff".to_string()),
|
||||
},
|
||||
network_policy: NetworkPolicy {
|
||||
access: NetworkAccessPolicy::StaticArp {
|
||||
interface: ROUTER_BRIDGE.to_string(),
|
||||
},
|
||||
ip6_slaac: None,
|
||||
},
|
||||
delete_after: 0,
|
||||
smtp: None,
|
||||
router: Some(RouterConfig::Mikrotik {
|
||||
url: "https://localhost".to_string(),
|
||||
username: "admin".to_string(),
|
||||
password: "password123".to_string(),
|
||||
}),
|
||||
dns: Some(DnsServerConfig::Cloudflare {
|
||||
token: "abc".to_string(),
|
||||
forward_zone_id: "123".to_string(),
|
||||
reverse_zone_id: "456".to_string(),
|
||||
}),
|
||||
nostr: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_user(db: &Arc<MockDb>) -> Result<(User, UserSshKey)> {
|
||||
let pubkey: [u8; 32] = rand::random();
|
||||
|
||||
let user_id = db.upsert_user(&pubkey).await?;
|
||||
let mut new_key = UserSshKey {
|
||||
id: 0,
|
||||
name: "test-key".to_string(),
|
||||
user_id,
|
||||
created: Default::default(),
|
||||
key_data: "ssh-rsa AAA==".to_string(),
|
||||
};
|
||||
let ssh_key = db.insert_user_ssh_key(&new_key).await?;
|
||||
new_key.id = ssh_key;
|
||||
Ok((db.get_user(user_id).await?, new_key))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn basic() -> Result<()> {
|
||||
let settings = settings();
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::default());
|
||||
let rates = Arc::new(DefaultRateCache::default());
|
||||
let router = MockRouter::new(settings.network_policy.clone());
|
||||
let dns = MockDnsServer::new();
|
||||
let provisioner = LNVpsProvisioner::new(settings, db.clone(), node.clone(), rates.clone());
|
||||
|
||||
let (user, ssh_key) = add_user(&db).await?;
|
||||
let vm = provisioner
|
||||
.provision(user.id, 1, 1, ssh_key.id, Some("mock-ref".to_string()))
|
||||
.await?;
|
||||
println!("{:?}", vm);
|
||||
provisioner.spawn_vm(vm.id).await?;
|
||||
|
||||
// check resources
|
||||
let arp = router.list_arp_entry().await?;
|
||||
assert_eq!(1, arp.len());
|
||||
let arp = arp.first().unwrap();
|
||||
assert_eq!(&vm.mac_address, &arp.mac_address);
|
||||
assert_eq!(vm.ref_code, Some("mock-ref".to_string()));
|
||||
assert_eq!(ROUTER_BRIDGE, arp.interface.as_ref().unwrap());
|
||||
println!("{:?}", arp);
|
||||
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
assert_eq!(1, ips.len());
|
||||
let ip = ips.first().unwrap();
|
||||
println!("{:?}", ip);
|
||||
assert_eq!(ip.ip, arp.address);
|
||||
assert_eq!(ip.ip_range_id, 1);
|
||||
assert_eq!(ip.vm_id, vm.id);
|
||||
assert!(ip.dns_forward.is_some());
|
||||
assert!(ip.dns_reverse.is_some());
|
||||
assert!(ip.dns_reverse_ref.is_some());
|
||||
assert!(ip.dns_forward_ref.is_some());
|
||||
assert_eq!(ip.dns_reverse, ip.dns_forward);
|
||||
|
||||
// assert IP address is not CIDR
|
||||
assert!(IpAddr::from_str(&ip.ip).is_ok());
|
||||
assert!(!ip.ip.ends_with("/8"));
|
||||
assert!(!ip.ip.ends_with("/24"));
|
||||
|
||||
// now expire
|
||||
provisioner.delete_vm(vm.id).await?;
|
||||
|
||||
// test arp/dns is removed
|
||||
let arp = router.list_arp_entry().await?;
|
||||
assert!(arp.is_empty());
|
||||
assert_eq!(dns.forward.lock().await.len(), 0);
|
||||
assert_eq!(dns.reverse.lock().await.len(), 0);
|
||||
|
||||
// ensure IPS are deleted
|
||||
let ips = db.ip_assignments.lock().await;
|
||||
let ip = ips.values().next().unwrap();
|
||||
assert!(ip.arp_ref.is_none());
|
||||
assert!(ip.dns_forward.is_none());
|
||||
assert!(ip.dns_reverse.is_none());
|
||||
assert!(ip.dns_reverse_ref.is_none());
|
||||
assert!(ip.dns_forward_ref.is_none());
|
||||
assert!(ip.deleted);
|
||||
println!("{:?}", ip);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_no_capacity() -> Result<()> {
|
||||
let settings = settings();
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::default());
|
||||
let rates = Arc::new(DefaultRateCache::default());
|
||||
let prov = LNVpsProvisioner::new(settings.clone(), db.clone(), node.clone(), rates.clone());
|
||||
|
||||
let large_template = VmTemplate {
|
||||
id: 0,
|
||||
name: "mock-large-template".to_string(),
|
||||
enabled: true,
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
cpu: 64,
|
||||
memory: 512 * MockDb::GB,
|
||||
disk_size: 20 * MockDb::TB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
region_id: 1,
|
||||
};
|
||||
let id = db.insert_vm_template(&large_template).await?;
|
||||
|
||||
let (user, ssh_key) = add_user(&db).await?;
|
||||
|
||||
let prov = prov.provision(user.id, id, 1, ssh_key.id, None).await;
|
||||
assert!(prov.is_err());
|
||||
if let Err(e) = prov {
|
||||
println!("{}", e);
|
||||
assert!(e.to_string().to_lowercase().contains("no available host"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,135 +0,0 @@
|
||||
use anyhow::{bail, Result};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use rand::prelude::IteratorRandom;
|
||||
use std::collections::HashSet;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum ProvisionerMethod {
|
||||
Sequential,
|
||||
Random,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct AvailableIp {
|
||||
pub ip: IpAddr,
|
||||
pub gateway: IpNetwork,
|
||||
pub range_id: u64,
|
||||
pub region_id: u64,
|
||||
}
|
||||
|
||||
/// Handles picking available IPs
|
||||
#[derive(Clone)]
|
||||
pub struct NetworkProvisioner {
|
||||
method: ProvisionerMethod,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
}
|
||||
|
||||
impl NetworkProvisioner {
|
||||
pub fn new(method: ProvisionerMethod, db: Arc<dyn LNVpsDb>) -> Self {
|
||||
Self { method, db }
|
||||
}
|
||||
|
||||
/// Pick an IP from one of the available ip ranges
|
||||
/// This method MUST return a free IP which can be used
|
||||
pub async fn pick_ip_for_region(&self, region_id: u64) -> Result<AvailableIp> {
|
||||
let ip_ranges = self.db.list_ip_range_in_region(region_id).await?;
|
||||
if ip_ranges.is_empty() {
|
||||
bail!("No ip range found in this region");
|
||||
}
|
||||
|
||||
for range in ip_ranges {
|
||||
let range_cidr: IpNetwork = range.cidr.parse()?;
|
||||
let ips = self.db.list_vm_ip_assignments_in_range(range.id).await?;
|
||||
let mut ips: HashSet<IpAddr> = ips.iter().map_while(|i| i.ip.parse().ok()).collect();
|
||||
|
||||
let gateway: IpNetwork = range.gateway.parse()?;
|
||||
|
||||
// mark some IPS as always used
|
||||
// Namely:
|
||||
// .0 & .255 of /24 (first and last)
|
||||
// gateway ip of the range
|
||||
ips.insert(range_cidr.iter().next().unwrap());
|
||||
ips.insert(range_cidr.iter().last().unwrap());
|
||||
ips.insert(gateway.ip());
|
||||
|
||||
// pick an IP at random
|
||||
let ip_pick = {
|
||||
match self.method {
|
||||
ProvisionerMethod::Sequential => range_cidr.iter().find(|i| !ips.contains(i)),
|
||||
ProvisionerMethod::Random => {
|
||||
let mut rng = rand::rng();
|
||||
loop {
|
||||
if let Some(i) = range_cidr.iter().choose(&mut rng) {
|
||||
if !ips.contains(&i) {
|
||||
break Some(i);
|
||||
}
|
||||
} else {
|
||||
break None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(ip_pick) = ip_pick {
|
||||
return Ok(AvailableIp {
|
||||
range_id: range.id,
|
||||
gateway,
|
||||
ip: ip_pick,
|
||||
region_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
bail!("No IPs available in this region");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::mocks::*;
|
||||
|
||||
use lnvps_db::VmIpAssignment;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[tokio::test]
|
||||
async fn pick_seq_ip_for_region_test() {
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(MockDb::default());
|
||||
let mgr = NetworkProvisioner::new(ProvisionerMethod::Sequential, db.clone());
|
||||
|
||||
let gateway = IpNetwork::from_str("10.0.0.1/8").unwrap();
|
||||
let first = IpAddr::from_str("10.0.0.2").unwrap();
|
||||
let second = IpAddr::from_str("10.0.0.3").unwrap();
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(1, ip.region_id);
|
||||
assert_eq!(first, ip.ip);
|
||||
assert_eq!(gateway, ip.gateway);
|
||||
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(1, ip.region_id);
|
||||
assert_eq!(first, ip.ip);
|
||||
db.insert_vm_ip_assignment(&VmIpAssignment {
|
||||
id: 0,
|
||||
vm_id: 0,
|
||||
ip_range_id: ip.range_id,
|
||||
ip: ip.ip.to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.expect("Could not insert vm ip");
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(second, ip.ip);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pick_rng_ip_for_region_test() {
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(MockDb::default());
|
||||
let mgr = NetworkProvisioner::new(ProvisionerMethod::Random, db);
|
||||
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(1, ip.region_id);
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user