Compare commits
42 Commits
prices
...
e2d6d84439
Author | SHA1 | Date | |
---|---|---|---|
e2d6d84439
|
|||
ea6499558d
|
|||
1bb03762bd
|
|||
603099e947
|
|||
d18f32e897
|
|||
f4b8f88772
|
|||
c4373b78d0
|
|||
39ca5ee8b4
|
|||
cd7c7cd7be
|
|||
c570222e8a
|
|||
32fc16dca2
|
|||
a57c85fa2c
|
|||
4bf8b06337
|
|||
2505082a59
|
|||
ec7fa92010
|
|||
af36d4e586
|
|||
9106221204
|
|||
0b51a5ecee
|
|||
6850b786cf
|
|||
cb71ba8bc6
|
|||
b6356636de | |||
9fb4a38e72 | |||
cbafca8da7 | |||
9ee4232706 | |||
39622315be | |||
b190fcdd1c | |||
6b12a9bddb | |||
6de4471861 | |||
3527742992 | |||
be4a981bea | |||
f934bb3132 | |||
6c7ae6ac89 | |||
5c57abb9c1 | |||
2d55392050 | |||
02d606d60c
|
|||
029f2cb6e1
|
|||
45dd0c4398
|
|||
1c282e460f
|
|||
a2e08c5965
|
|||
b9f21c09bd
|
|||
d94ca9e1bb
|
|||
9606b91e6d
|
757
Cargo.lock
generated
757
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
21
Cargo.toml
21
Cargo.toml
@ -7,22 +7,23 @@ edition = "2021"
|
||||
name = "api"
|
||||
|
||||
[features]
|
||||
default = ["mikrotik", "nostr-dm", "proxmox", "lnd", "cloudflare"]
|
||||
default = ["mikrotik", "nostr-dm", "nostr-dvm", "proxmox", "lnd", "cloudflare", "revolut", "bitvora"]
|
||||
mikrotik = ["dep:reqwest"]
|
||||
nostr-dm = ["dep:nostr-sdk"]
|
||||
nostr-dvm = ["dep:nostr-sdk"]
|
||||
proxmox = ["dep:reqwest", "dep:ssh2", "dep:tokio-tungstenite"]
|
||||
libvirt = ["dep:virt"]
|
||||
lnd = ["dep:fedimint-tonic-lnd"]
|
||||
bitvora = ["dep:reqwest", "dep:tokio-stream"]
|
||||
cloudflare = ["dep:reqwest"]
|
||||
revolut = ["dep:reqwest", "dep:sha2", "dep:hmac"]
|
||||
|
||||
[dependencies]
|
||||
lnvps_db = { path = "lnvps_db" }
|
||||
tokio = { version = "1.37.0", features = ["rt", "rt-multi-thread", "macros", "sync"] }
|
||||
tokio = { version = "1.37.0", features = ["rt", "rt-multi-thread", "macros", "sync", "io-util"] }
|
||||
anyhow = "1.0.83"
|
||||
config = { version = "0.15.8", features = ["yaml"] }
|
||||
log = "0.4.21"
|
||||
fern = "0.7.1"
|
||||
serde = { version = "1.0.213", features = ["derive"] }
|
||||
serde_json = "1.0.132"
|
||||
rocket = { version = "0.5.1", features = ["json"] }
|
||||
@ -36,14 +37,15 @@ rand = "0.9.0"
|
||||
clap = { version = "4.5.21", features = ["derive"] }
|
||||
ssh-key = "0.6.7"
|
||||
lettre = { version = "0.11.10", features = ["tokio1-native-tls"] }
|
||||
ws = { package = "rocket_ws", version = "0.1.0" }
|
||||
ws = { package = "rocket_ws", version = "0.1.1" }
|
||||
native-tls = "0.2.12"
|
||||
hex = "0.4.3"
|
||||
futures = "0.3.31"
|
||||
isocountry = "0.3.2"
|
||||
|
||||
#nostr-dm
|
||||
nostr = { version = "0.39.0", default-features = false, features = ["std"] }
|
||||
nostr-sdk = { version = "0.39.0", optional = true, default-features = false, features = ["nip44", "nip59"] }
|
||||
nostr = { version = "0.40.0", default-features = false, features = ["std"] }
|
||||
nostr-sdk = { version = "0.40.0", optional = true, default-features = false, features = ["nip44", "nip59"] }
|
||||
|
||||
#proxmox
|
||||
tokio-tungstenite = { version = "^0.21", features = ["native-tls"], optional = true }
|
||||
@ -57,4 +59,9 @@ virt = { version = "0.4.2", optional = true }
|
||||
fedimint-tonic-lnd = { version = "0.2.0", default-features = false, features = ["invoicesrpc"], optional = true }
|
||||
|
||||
#bitvora
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"], optional = true }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"], optional = true }
|
||||
|
||||
#revolut
|
||||
sha2 = { version = "0.10.8", optional = true }
|
||||
hmac = { version = "0.12.1", optional = true }
|
||||
env_logger = "0.11.7"
|
||||
|
81
README.md
81
README.md
@ -2,13 +2,22 @@
|
||||
|
||||
A bitcoin powered VPS system.
|
||||
|
||||
## Requirements
|
||||
## Features
|
||||
|
||||
- MySql database
|
||||
- Lightning node:
|
||||
- LND
|
||||
- [Bitvora](https://bitvora.com?r=lnvps)
|
||||
- Proxmox server
|
||||
- MySQL database
|
||||
- Payments:
|
||||
- Bitcoin:
|
||||
- LND
|
||||
- [Bitvora](https://bitvora.com?r=lnvps)
|
||||
- Fiat:
|
||||
- [RevolutPay](https://www.revolut.com/business/revolut-pay/)
|
||||
- VM Backend:
|
||||
- Proxmox
|
||||
- Network Resources:
|
||||
- Mikrotik JSON-API
|
||||
- OVH API (dedicated server virtual mac)
|
||||
- DNS Resources:
|
||||
- Cloudflare API
|
||||
|
||||
## Required Config
|
||||
|
||||
@ -43,15 +52,7 @@ provisioner:
|
||||
os-type: "l26"
|
||||
bridge: "vmbr0"
|
||||
cpu: "kvm64"
|
||||
vlan: 100
|
||||
kvm: false
|
||||
|
||||
# Networking policy
|
||||
network-policy:
|
||||
# Configure network equipment on provisioning IP resources
|
||||
access: "auto"
|
||||
# Use SLAAC to auto-configure VM ipv6 addresses
|
||||
ip6-slaac: true
|
||||
```
|
||||
|
||||
### Email notifications
|
||||
@ -89,42 +90,28 @@ nostr:
|
||||
|
||||
### Network Setup (Advanced)
|
||||
|
||||
When ARP is disabled (reply-only) on your router you may need to create static ARP entries when allocating
|
||||
IPs, we support managing ARP entries on routers directly as part of the provisioning process.
|
||||
|
||||
```yaml
|
||||
# (Optional)
|
||||
# When allocating IPs for VM's it may be necessary to create static ARP entries on
|
||||
# your router, at least one router can be configured
|
||||
#
|
||||
# Currently supports: Mikrotik
|
||||
router:
|
||||
mikrotik:
|
||||
# !! MAKE SURE TO USE HTTPS !!
|
||||
url: "https://my-router.net"
|
||||
username: "admin"
|
||||
password: "admin"
|
||||
network-policy:
|
||||
# How packets get to the VM
|
||||
# (default "auto", nothing to do, packets will always arrive)
|
||||
access:
|
||||
# Static ARP entries are added to the router for each provisioned IP
|
||||
static-arp:
|
||||
# Interface where the static ARP entry is added
|
||||
interface: "bridge1"
|
||||
```
|
||||
**TODO:** AccessPolicy is now managed in the database
|
||||
|
||||
### DNS (PTR/A/AAAA)
|
||||
|
||||
To create PTR records automatically use the following config:
|
||||
```yaml
|
||||
dns:
|
||||
cloudflare:
|
||||
# The zone containing the reverse domain (eg. X.Y.Z.in-addr.arpa)
|
||||
reverse-zone-id: "my-reverse-zone-id"
|
||||
# The zone where forward (A/AAAA) entries are added (eg. lnvps.cloud zone)
|
||||
# We create forward entries with the format vm-<vmid>.lnvps.cloud
|
||||
forward-zone-id: "my-forward-zone-id"
|
||||
# API token to add/remove DNS records to this zone
|
||||
token: "my-api-token"
|
||||
```
|
||||
# The zone where forward (A/AAAA) entries are added (eg. lnvps.cloud zone)
|
||||
# We create forward entries with the format vm-<vmid>.lnvps.cloud
|
||||
forward-zone-id: "my-forward-zone-id"
|
||||
api:
|
||||
cloudflare:
|
||||
# API token to add/remove DNS records to this zone
|
||||
token: "my-api-token"
|
||||
```
|
||||
|
||||
### Taxes
|
||||
To charge taxes add the following config, the values are percentage whole numbers:
|
||||
```yaml
|
||||
tax-rate:
|
||||
IE: 23
|
||||
US: 15
|
||||
```
|
||||
|
||||
Taxes are charged based on the users specified country
|
@ -5,6 +5,7 @@ lightning:
|
||||
cert: "/home/kieran/.polar/networks/2/volumes/lnd/alice/tls.cert"
|
||||
macaroon: "/home/kieran/.polar/networks/2/volumes/lnd/alice/data/chain/bitcoin/regtest/admin.macaroon"
|
||||
delete-after: 3
|
||||
public-url: "https://api.lnvps.net"
|
||||
provisioner:
|
||||
proxmox:
|
||||
read-only: false
|
||||
|
@ -2,7 +2,7 @@ volumes:
|
||||
db:
|
||||
services:
|
||||
db:
|
||||
image: mariadb
|
||||
image: docker.io/mariadb
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "MARIADB_ROOT_PASSWORD=root"
|
||||
|
700
grafana.json
Normal file
700
grafana.json
Normal file
@ -0,0 +1,700 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 1,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "left",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"filterable": false,
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "cpu"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 62
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "gb_memory"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 88
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "gb_ssd"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 81
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "gb_hdd"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 75
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "load_factor"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 93
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "sold_cpu"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 87
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "sold_gb_memory"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 133
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "sold_gb_ssd_disk"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 141
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "name"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 205
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 3,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": []
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT \nh.name,\nh.cpu,\nh.memory / 1024 / 1024 / 1024 gb_memory,\n(select sum(size) from vm_host_disk hd where hd.host_id = h.id and hd.enabled = 1 and hd.kind = 1) / 1024 / 1024 / 1024 gb_ssd,\n(select sum(size) from vm_host_disk hd where hd.host_id = h.id and hd.enabled = 1 and hd.kind = 0) / 1024 / 1024 / 1024 gb_hdd,\n(select sum(case when v.template_id is null then (select cpu from vm_custom_template vct where vct.id = v.custom_template_id) else (select cpu from vm_template vt where vt.id = v.template_id) end) from vm v where v.host_id = h.id and expires > current_timestamp()) sold_cpu,\n(select sum(case when v.template_id is null then (select memory from vm_custom_template vct where vct.id = v.custom_template_id) else (select memory from vm_template vt where vt.id = v.template_id) end) from vm v where v.host_id = h.id and expires > current_timestamp()) / 1024 / 1024 / 1024 sold_gb_memory,\n(select sum(case when v.template_id is null then (select disk_size from vm_custom_template vct where vct.id = v.custom_template_id and vct.disk_type = 1) else (select disk_size from vm_template vt where vt.id = v.template_id and vt.disk_type = 1) end) from vm v where v.host_id = h.id and expires > current_timestamp()) / 1024 / 1024 / 1024 sold_gb_ssd_disk,\n(select sum(case when v.template_id is null then (select disk_size from vm_custom_template vct where vct.id = v.custom_template_id and vct.disk_type = 0) else (select disk_size from vm_template vt where vt.id = v.template_id and vt.disk_type = 0) end) from vm v where v.host_id = h.id and expires > current_timestamp()) / 1024 / 1024 / 1024 sold_gb_hdd_disk,\n(select sum(case when v.template_id is null then 0 else (select (case when cp.currency = 'BTC' then (cp.amount/1e9) else cp.amount end) from vm_template vt,vm_cost_plan cp where vt.id = v.template_id and vt.cost_plan_id = cp.id) end) from vm v where v.host_id = h.id and deleted = 0) income\nfrom vm_host h",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Host Allocation",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 13,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 5
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select\nv.id,\ndatediff(current_timestamp(), v.created) age,\ndatediff(v.expires, current_timestamp()) days_to_expire,\n(select sum((case when currency = 'BTC' then amount / 1e11 else amount end) * rate) from lnvps.vm_payment where vm_id = v.id and is_paid = 1) total_payments\nfrom vm v\nwhere v.deleted = 0\norder by 3 asc",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Renewals",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "id"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 49
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "ref_code"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 91
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "created"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "amount"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 71
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "currency"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 78
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 5
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": []
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select v.id, \nv.ref_code, v.created, \n(case when vp.currency = 'BTC' then vp.amount / 1000 else vp.amount / 100 end) amount,\nvp.currency,\n(case when vp.currency = 'BTC' then vp.amount / 1000 else vp.amount / 100 end) * 0.33 comission\nfrom vm v, vm_payment vp\nwhere v.ref_code is not null\nand v.id = vp.vm_id\nand vp.is_paid = 1\norder by vp.created desc",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "RefCodes",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 13
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select vm_id, created, \n(case when currency = 'BTC' then (amount / 1e3) else amount / 100 end) amount, \n(case when currency = 'BTC' then (amount / 1e11) * rate else amount * rate end) amount_eur,\ncurrency,\n(case when payment_method = 0 then 'LN' else 'Revolut' end) method\nfrom vm_payment\nwhere is_paid = 1\norder by created desc\nlimit 20",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "Payments",
|
||||
"type": "table"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "free"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "percentunit"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "region"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 70
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "used"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 59
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "size"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 70
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "size"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "sishort"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"cellHeight": "sm",
|
||||
"footer": {
|
||||
"countRows": false,
|
||||
"fields": "",
|
||||
"reducer": [
|
||||
"sum"
|
||||
],
|
||||
"show": false
|
||||
},
|
||||
"showHeader": true,
|
||||
"sortBy": []
|
||||
},
|
||||
"pluginVersion": "11.5.2",
|
||||
"targets": [
|
||||
{
|
||||
"dataset": "lnvps",
|
||||
"datasource": {
|
||||
"type": "mysql",
|
||||
"uid": "cegjfe9u9181sf"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "select i.cidr, i.region, i.used, i.size, (1-i.used/i.size) as free\nfrom (\nselect r.cidr, \n(select count(id) from lnvps.vm_ip_assignment where ip_range_id = r.id and deleted = 0) used,\nhr.name as region,\npow(2, (case when r.cidr like '%:%' then 128 else 32 end)-substring_index(r.cidr, '/', -1)) as size\nfrom ip_range r, vm_host_region hr\nwhere r.region_id = hr.id) i",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
{
|
||||
"parameters": [],
|
||||
"type": "function"
|
||||
}
|
||||
],
|
||||
"groupBy": [
|
||||
{
|
||||
"property": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "groupBy"
|
||||
}
|
||||
],
|
||||
"limit": 50
|
||||
}
|
||||
}
|
||||
],
|
||||
"title": "IP Ranges",
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"refresh": "",
|
||||
"schemaVersion": 40,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "LNVPS",
|
||||
"uid": "begjfxfrjwu80e",
|
||||
"version": 23,
|
||||
"weekStart": ""
|
||||
}
|
455
lnvps_db/Cargo.lock
generated
455
lnvps_db/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
6
lnvps_db/migrations/20250310153305_fiat_payment.sql
Normal file
6
lnvps_db/migrations/20250310153305_fiat_payment.sql
Normal file
@ -0,0 +1,6 @@
|
||||
alter table vm_payment
|
||||
add column currency varchar(5) not null default 'BTC',
|
||||
add column payment_method smallint unsigned not null default 0,
|
||||
add column external_id varchar(255),
|
||||
change invoice external_data varchar (4096) NOT NULL,
|
||||
drop column settle_index;
|
4
lnvps_db/migrations/20250311135933_taxes.sql
Normal file
4
lnvps_db/migrations/20250311135933_taxes.sql
Normal file
@ -0,0 +1,4 @@
|
||||
alter table vm_payment
|
||||
add column tax bigint unsigned not null;
|
||||
alter table users
|
||||
add column country_code varchar(3) not null default 'USA';
|
5
lnvps_db/migrations/20250313140640_empty_country.sql
Normal file
5
lnvps_db/migrations/20250313140640_empty_country.sql
Normal file
@ -0,0 +1,5 @@
|
||||
alter table users
|
||||
change column country_code country_code varchar (3);
|
||||
-- assume country_code was not actually set until now
|
||||
update users
|
||||
set country_code = null;
|
5
lnvps_db/migrations/20250324143556_load_factors.sql
Normal file
5
lnvps_db/migrations/20250324143556_load_factors.sql
Normal file
@ -0,0 +1,5 @@
|
||||
-- Add migration script here
|
||||
alter table vm_host
|
||||
add column load_memory float not null default 1.0,
|
||||
add column load_disk float not null default 1.0,
|
||||
change column load_factor load_cpu float not null default 1.0
|
23
lnvps_db/migrations/20250325113115_extend_ip_range.sql
Normal file
23
lnvps_db/migrations/20250325113115_extend_ip_range.sql
Normal file
@ -0,0 +1,23 @@
|
||||
create table router
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
name varchar(100) not null,
|
||||
enabled bit(1) not null,
|
||||
kind smallint unsigned not null,
|
||||
url varchar(255) not null,
|
||||
token varchar(128) not null
|
||||
);
|
||||
create table access_policy
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
name varchar(100) not null,
|
||||
kind smallint unsigned not null,
|
||||
router_id integer unsigned,
|
||||
interface varchar(100),
|
||||
constraint fk_access_policy_router foreign key (router_id) references router (id)
|
||||
);
|
||||
alter table ip_range
|
||||
add column reverse_zone_id varchar(255),
|
||||
add column access_policy_id integer unsigned;
|
||||
alter table ip_range
|
||||
add constraint fk_ip_range_access_policy foreign key (access_policy_id) references access_policy (id);
|
@ -0,0 +1,2 @@
|
||||
alter table vm_host
|
||||
add column vlan_id integer unsigned;
|
@ -0,0 +1,3 @@
|
||||
alter table ip_range
|
||||
add column allocation_mode smallint unsigned not null default 0,
|
||||
add column use_full_range bit(1) not null;
|
@ -38,9 +38,15 @@ pub trait LNVpsDb: Sync + Send {
|
||||
/// List a users ssh keys
|
||||
async fn list_user_ssh_key(&self, user_id: u64) -> Result<Vec<UserSshKey>>;
|
||||
|
||||
/// Get VM host regions
|
||||
async fn list_host_region(&self) -> Result<Vec<VmHostRegion>>;
|
||||
|
||||
/// Get VM host region by id
|
||||
async fn get_host_region(&self, id: u64) -> Result<VmHostRegion>;
|
||||
|
||||
/// Get VM host region by name
|
||||
async fn get_host_region_by_name(&self, name: &str) -> Result<VmHostRegion>;
|
||||
|
||||
/// List VM's owned by a specific user
|
||||
async fn list_hosts(&self) -> Result<Vec<VmHost>>;
|
||||
|
||||
@ -56,6 +62,9 @@ pub trait LNVpsDb: Sync + Send {
|
||||
/// Get a specific host disk
|
||||
async fn get_host_disk(&self, disk_id: u64) -> Result<VmHostDisk>;
|
||||
|
||||
/// Update a host disk
|
||||
async fn update_host_disk(&self, disk: &VmHostDisk) -> Result<()>;
|
||||
|
||||
/// Get OS image by id
|
||||
async fn get_os_image(&self, id: u64) -> Result<VmOsImage>;
|
||||
|
||||
@ -131,6 +140,9 @@ pub trait LNVpsDb: Sync + Send {
|
||||
/// Get VM payment by payment id
|
||||
async fn get_vm_payment(&self, id: &Vec<u8>) -> Result<VmPayment>;
|
||||
|
||||
/// Get VM payment by payment id
|
||||
async fn get_vm_payment_by_ext_id(&self, id: &str) -> Result<VmPayment>;
|
||||
|
||||
/// Update a VM payment record
|
||||
async fn update_vm_payment(&self, vm_payment: &VmPayment) -> Result<()>;
|
||||
|
||||
@ -154,4 +166,10 @@ pub trait LNVpsDb: Sync + Send {
|
||||
|
||||
/// Return the list of disk prices for a given custom pricing model
|
||||
async fn list_custom_pricing_disk(&self, pricing_id: u64) -> Result<Vec<VmCustomPricingDisk>>;
|
||||
|
||||
/// Get router config
|
||||
async fn get_router(&self, router_id: u64) -> Result<Router>;
|
||||
|
||||
/// Get access policy
|
||||
async fn get_access_policy(&self, access_policy_id: u64) -> Result<AccessPolicy>;
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::FromRow;
|
||||
use sqlx::{FromRow, Type};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use url::Url;
|
||||
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
@ -20,6 +21,8 @@ pub struct User {
|
||||
pub contact_nip17: bool,
|
||||
/// If user should be contacted via email for notifications
|
||||
pub contact_email: bool,
|
||||
/// Users country
|
||||
pub country_code: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
@ -77,8 +80,14 @@ pub struct VmHost {
|
||||
pub enabled: bool,
|
||||
/// API token used to control this host via [ip]
|
||||
pub api_token: String,
|
||||
/// Load factor for provisioning
|
||||
pub load_factor: f32,
|
||||
/// CPU load factor for provisioning
|
||||
pub load_cpu: f32,
|
||||
/// Memory load factor
|
||||
pub load_memory: f32,
|
||||
/// Disk load factor
|
||||
pub load_disk: f32,
|
||||
/// VLAN id assigned to all vms on the host
|
||||
pub vlan_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
@ -100,6 +109,27 @@ pub enum DiskType {
|
||||
SSD = 1,
|
||||
}
|
||||
|
||||
impl FromStr for DiskType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"hdd" => Ok(DiskType::HDD),
|
||||
"ssd" => Ok(DiskType::SSD),
|
||||
_ => Err(anyhow!("unknown disk type {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DiskType {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DiskType::HDD => write!(f, "hdd"),
|
||||
DiskType::SSD => write!(f, "ssd"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, sqlx::Type, Default, PartialEq, Eq)]
|
||||
#[repr(u16)]
|
||||
pub enum DiskInterface {
|
||||
@ -109,6 +139,29 @@ pub enum DiskInterface {
|
||||
PCIe = 2,
|
||||
}
|
||||
|
||||
impl FromStr for DiskInterface {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"sata" => Ok(DiskInterface::SATA),
|
||||
"scsi" => Ok(DiskInterface::SCSI),
|
||||
"pcie" => Ok(DiskInterface::PCIe),
|
||||
_ => Err(anyhow!("unknown disk interface {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DiskInterface {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DiskInterface::SATA => write!(f, "sata"),
|
||||
DiskInterface::SCSI => write!(f, "scsi"),
|
||||
DiskInterface::PCIe => write!(f, "pcie"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, sqlx::Type, Default, PartialEq, Eq)]
|
||||
#[repr(u16)]
|
||||
pub enum OsDistribution {
|
||||
@ -123,6 +176,24 @@ pub enum OsDistribution {
|
||||
RedHatEnterprise = 7,
|
||||
}
|
||||
|
||||
impl FromStr for OsDistribution {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"ubuntu" => Ok(OsDistribution::Ubuntu),
|
||||
"debian" => Ok(OsDistribution::Debian),
|
||||
"centos" => Ok(OsDistribution::CentOS),
|
||||
"fedora" => Ok(OsDistribution::Fedora),
|
||||
"freebsd" => Ok(OsDistribution::FreeBSD),
|
||||
"opensuse" => Ok(OsDistribution::OpenSUSE),
|
||||
"archlinux" => Ok(OsDistribution::ArchLinux),
|
||||
"redhatenterprise" => Ok(OsDistribution::RedHatEnterprise),
|
||||
_ => Err(anyhow!("unknown distribution {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// OS Images are templates which are used as a basis for
|
||||
/// provisioning new vms
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
@ -158,12 +229,68 @@ impl Display for VmOsImage {
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
pub struct Router {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub enabled: bool,
|
||||
pub kind: RouterKind,
|
||||
pub url: String,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::Type)]
|
||||
#[repr(u16)]
|
||||
pub enum RouterKind {
|
||||
/// Mikrotik router (JSON-Api)
|
||||
Mikrotik = 0,
|
||||
/// A pseudo-router which allows adding virtual mac addresses to a dedicated server
|
||||
OvhAdditionalIp = 1,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct IpRange {
|
||||
pub id: u64,
|
||||
pub cidr: String,
|
||||
pub gateway: String,
|
||||
pub enabled: bool,
|
||||
pub region_id: u64,
|
||||
pub reverse_zone_id: Option<String>,
|
||||
pub access_policy_id: Option<u64>,
|
||||
pub allocation_mode: IpRangeAllocationMode,
|
||||
/// Use all IPs in the range, including first and last
|
||||
pub use_full_range: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::Type, Default)]
|
||||
#[repr(u16)]
|
||||
/// How ips are allocated from this range
|
||||
pub enum IpRangeAllocationMode {
|
||||
/// IPs are assigned in a random order
|
||||
Random = 0,
|
||||
#[default]
|
||||
/// IPs are assigned in sequential order
|
||||
Sequential = 1,
|
||||
/// IP(v6) assignment uses SLAAC EUI-64
|
||||
SlaacEui64 = 2,
|
||||
}
|
||||
|
||||
#[derive(FromRow, Clone, Debug)]
|
||||
pub struct AccessPolicy {
|
||||
pub id: u64,
|
||||
pub name: String,
|
||||
pub kind: NetworkAccessPolicy,
|
||||
/// Router used to apply this network access policy
|
||||
pub router_id: Option<u64>,
|
||||
/// Interface name used to apply this policy
|
||||
pub interface: Option<String>,
|
||||
}
|
||||
|
||||
/// Policy that determines how packets arrive at the VM
|
||||
#[derive(Debug, Clone, sqlx::Type)]
|
||||
#[repr(u16)]
|
||||
pub enum NetworkAccessPolicy {
|
||||
/// ARP entries are added statically on the access router
|
||||
StaticArp = 0,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, sqlx::Type)]
|
||||
@ -309,17 +436,55 @@ impl Display for VmIpAssignment {
|
||||
|
||||
#[derive(FromRow, Clone, Debug, Default)]
|
||||
pub struct VmPayment {
|
||||
/// Payment hash
|
||||
pub id: Vec<u8>,
|
||||
pub vm_id: u64,
|
||||
pub created: DateTime<Utc>,
|
||||
pub expires: DateTime<Utc>,
|
||||
pub amount: u64,
|
||||
pub invoice: String,
|
||||
pub currency: String,
|
||||
pub payment_method: PaymentMethod,
|
||||
/// External data (invoice / json)
|
||||
pub external_data: String,
|
||||
/// External id on other system
|
||||
pub external_id: Option<String>,
|
||||
pub is_paid: bool,
|
||||
/// Exchange rate
|
||||
/// TODO: handle other base currencies
|
||||
/// Exchange rate back to base currency (EUR)
|
||||
pub rate: f32,
|
||||
/// Number of seconds this payment will add to vm expiry
|
||||
pub time_value: u64,
|
||||
pub settle_index: Option<u64>,
|
||||
/// Taxes to charge on payment
|
||||
pub tax: u64,
|
||||
}
|
||||
|
||||
#[derive(Type, Clone, Copy, Debug, Default, PartialEq)]
|
||||
#[repr(u16)]
|
||||
pub enum PaymentMethod {
|
||||
#[default]
|
||||
Lightning,
|
||||
Revolut,
|
||||
Paypal,
|
||||
}
|
||||
|
||||
impl Display for PaymentMethod {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PaymentMethod::Lightning => write!(f, "Lightning"),
|
||||
PaymentMethod::Revolut => write!(f, "Revolut"),
|
||||
PaymentMethod::Paypal => write!(f, "PayPal"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PaymentMethod {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s {
|
||||
"lightning" => Ok(PaymentMethod::Lightning),
|
||||
"revolut" => Ok(PaymentMethod::Revolut),
|
||||
"paypal" => Ok(PaymentMethod::Paypal),
|
||||
_ => bail!("Unknown payment method: {}", s),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
IpRange, LNVpsDb, User, UserSshKey, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk,
|
||||
VmCustomTemplate, VmHost, VmHostDisk, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment,
|
||||
VmTemplate,
|
||||
AccessPolicy, IpRange, LNVpsDb, Router, User, UserSshKey, Vm, VmCostPlan, VmCustomPricing,
|
||||
VmCustomPricingDisk, VmCustomTemplate, VmHost, VmHostDisk, VmHostRegion, VmIpAssignment,
|
||||
VmOsImage, VmPayment, VmTemplate,
|
||||
};
|
||||
use anyhow::{bail, Error, Result};
|
||||
use async_trait::async_trait;
|
||||
@ -60,14 +60,15 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
async fn update_user(&self, user: &User) -> Result<()> {
|
||||
sqlx::query(
|
||||
"update users set email = ?, contact_nip17 = ?, contact_email = ? where id = ?",
|
||||
"update users set email=?, contact_nip17=?, contact_email=?, country_code=? where id = ?",
|
||||
)
|
||||
.bind(&user.email)
|
||||
.bind(user.contact_nip17)
|
||||
.bind(user.contact_email)
|
||||
.bind(user.id)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
.bind(&user.email)
|
||||
.bind(user.contact_nip17)
|
||||
.bind(user.contact_email)
|
||||
.bind(&user.country_code)
|
||||
.bind(user.id)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -108,6 +109,13 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn list_host_region(&self) -> Result<Vec<VmHostRegion>> {
|
||||
sqlx::query_as("select * from vm_host_region where enabled=1")
|
||||
.fetch_all(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_host_region(&self, id: u64) -> Result<VmHostRegion> {
|
||||
sqlx::query_as("select * from vm_host_region where id=?")
|
||||
.bind(id)
|
||||
@ -116,8 +124,16 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_host_region_by_name(&self, name: &str) -> Result<VmHostRegion> {
|
||||
sqlx::query_as("select * from vm_host_region where name like ?")
|
||||
.bind(name)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn list_hosts(&self) -> Result<Vec<VmHost>> {
|
||||
sqlx::query_as("select * from vm_host where enabled = 1")
|
||||
sqlx::query_as("select h.* from vm_host h,vm_host_region hr where h.enabled = 1 and h.region_id = hr.id and hr.enabled = 1")
|
||||
.fetch_all(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
@ -158,6 +174,18 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn update_host_disk(&self, disk: &VmHostDisk) -> Result<()> {
|
||||
sqlx::query("update vm_host_disk set size=?,kind=?,interface=? where id=?")
|
||||
.bind(disk.size)
|
||||
.bind(disk.kind)
|
||||
.bind(disk.interface)
|
||||
.bind(disk.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_os_image(&self, id: u64) -> Result<VmOsImage> {
|
||||
sqlx::query_as("select * from vm_os_image where id=?")
|
||||
.bind(id)
|
||||
@ -306,17 +334,18 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
async fn update_vm(&self, vm: &Vm) -> Result<()> {
|
||||
sqlx::query(
|
||||
"update vm set image_id=?,template_id=?,ssh_key_id=?,expires=?,disk_id=? where id=?",
|
||||
"update vm set image_id=?,template_id=?,ssh_key_id=?,expires=?,disk_id=?,mac_address=? where id=?",
|
||||
)
|
||||
.bind(vm.image_id)
|
||||
.bind(vm.template_id)
|
||||
.bind(vm.ssh_key_id)
|
||||
.bind(vm.expires)
|
||||
.bind(vm.disk_id)
|
||||
.bind(vm.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
.bind(vm.image_id)
|
||||
.bind(vm.template_id)
|
||||
.bind(vm.ssh_key_id)
|
||||
.bind(vm.expires)
|
||||
.bind(vm.disk_id)
|
||||
.bind(&vm.mac_address)
|
||||
.bind(vm.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -324,18 +353,18 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
Ok(sqlx::query(
|
||||
"insert into vm_ip_assignment(vm_id,ip_range_id,ip,arp_ref,dns_forward,dns_forward_ref,dns_reverse,dns_reverse_ref) values(?,?,?,?,?,?,?,?) returning id",
|
||||
)
|
||||
.bind(ip_assignment.vm_id)
|
||||
.bind(ip_assignment.ip_range_id)
|
||||
.bind(&ip_assignment.ip)
|
||||
.bind(&ip_assignment.arp_ref)
|
||||
.bind(&ip_assignment.dns_forward)
|
||||
.bind(&ip_assignment.dns_forward_ref)
|
||||
.bind(&ip_assignment.dns_reverse)
|
||||
.bind(&ip_assignment.dns_reverse_ref)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?
|
||||
.try_get(0)?)
|
||||
.bind(ip_assignment.vm_id)
|
||||
.bind(ip_assignment.ip_range_id)
|
||||
.bind(&ip_assignment.ip)
|
||||
.bind(&ip_assignment.arp_ref)
|
||||
.bind(&ip_assignment.dns_forward)
|
||||
.bind(&ip_assignment.dns_forward_ref)
|
||||
.bind(&ip_assignment.dns_reverse)
|
||||
.bind(&ip_assignment.dns_reverse_ref)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?
|
||||
.try_get(0)?)
|
||||
}
|
||||
|
||||
async fn update_vm_ip_assignment(&self, ip_assignment: &VmIpAssignment) -> Result<()> {
|
||||
@ -387,16 +416,20 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
}
|
||||
|
||||
async fn insert_vm_payment(&self, vm_payment: &VmPayment) -> Result<()> {
|
||||
sqlx::query("insert into vm_payment(id,vm_id,created,expires,amount,invoice,time_value,is_paid,rate) values(?,?,?,?,?,?,?,?,?)")
|
||||
sqlx::query("insert into vm_payment(id,vm_id,created,expires,amount,tax,currency,payment_method,time_value,is_paid,rate,external_id,external_data) values(?,?,?,?,?,?,?,?,?,?,?,?,?)")
|
||||
.bind(&vm_payment.id)
|
||||
.bind(vm_payment.vm_id)
|
||||
.bind(vm_payment.created)
|
||||
.bind(vm_payment.expires)
|
||||
.bind(vm_payment.amount)
|
||||
.bind(&vm_payment.invoice)
|
||||
.bind(vm_payment.tax)
|
||||
.bind(&vm_payment.currency)
|
||||
.bind(&vm_payment.payment_method)
|
||||
.bind(vm_payment.time_value)
|
||||
.bind(vm_payment.is_paid)
|
||||
.bind(vm_payment.rate)
|
||||
.bind(&vm_payment.external_id)
|
||||
.bind(&vm_payment.external_data)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)?;
|
||||
@ -411,6 +444,14 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_vm_payment_by_ext_id(&self, id: &str) -> Result<VmPayment> {
|
||||
sqlx::query_as("select * from vm_payment where external_id=?")
|
||||
.bind(id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn update_vm_payment(&self, vm_payment: &VmPayment) -> Result<()> {
|
||||
sqlx::query("update vm_payment set is_paid = ? where id = ?")
|
||||
.bind(vm_payment.is_paid)
|
||||
@ -428,8 +469,8 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
let mut tx = self.db.begin().await?;
|
||||
|
||||
sqlx::query("update vm_payment set is_paid = true, settle_index = ? where id = ?")
|
||||
.bind(vm_payment.settle_index)
|
||||
sqlx::query("update vm_payment set is_paid = true, external_data = ? where id = ?")
|
||||
.bind(&vm_payment.external_data)
|
||||
.bind(&vm_payment.id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
@ -446,7 +487,7 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
|
||||
async fn last_paid_invoice(&self) -> Result<Option<VmPayment>> {
|
||||
sqlx::query_as(
|
||||
"select * from vm_payment where is_paid = true order by settle_index desc limit 1",
|
||||
"select * from vm_payment where is_paid = true order by created desc limit 1",
|
||||
)
|
||||
.fetch_optional(&self.db)
|
||||
.await
|
||||
@ -498,4 +539,20 @@ impl LNVpsDb for LNVpsDbMysql {
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_router(&self, router_id: u64) -> Result<Router> {
|
||||
sqlx::query_as("select * from router where id=?")
|
||||
.bind(router_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
|
||||
async fn get_access_policy(&self, access_policy_id: u64) -> Result<AccessPolicy> {
|
||||
sqlx::query_as("select * from access_policy where id=?")
|
||||
.bind(access_policy_id)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(Error::new)
|
||||
}
|
||||
}
|
||||
|
@ -10,4 +10,5 @@ pub fn routes() -> Vec<Route> {
|
||||
r
|
||||
}
|
||||
|
||||
pub use webhook::WebhookMessage;
|
||||
pub use webhook::WEBHOOK_BRIDGE;
|
||||
|
@ -1,16 +1,17 @@
|
||||
use crate::exchange::{alt_prices, Currency, CurrencyAmount, ExchangeRateService};
|
||||
use crate::provisioner::{PricingData, PricingEngine};
|
||||
use crate::provisioner::PricingEngine;
|
||||
use crate::status::VmState;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::{
|
||||
LNVpsDb, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate, VmHost,
|
||||
LNVpsDb, PaymentMethod, Vm, VmCostPlan, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate,
|
||||
VmHostRegion, VmTemplate,
|
||||
};
|
||||
use nostr::util::hex;
|
||||
use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -95,9 +96,9 @@ impl From<lnvps_db::DiskType> for DiskType {
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<lnvps_db::DiskType> for DiskType {
|
||||
fn into(self) -> lnvps_db::DiskType {
|
||||
match self {
|
||||
impl From<DiskType> for lnvps_db::DiskType {
|
||||
fn from(val: DiskType) -> Self {
|
||||
match val {
|
||||
DiskType::HDD => lnvps_db::DiskType::HDD,
|
||||
DiskType::SSD => lnvps_db::DiskType::SSD,
|
||||
}
|
||||
@ -143,12 +144,13 @@ impl ApiTemplatesResponse {
|
||||
pub async fn expand_pricing(&mut self, rates: &Arc<dyn ExchangeRateService>) -> Result<()> {
|
||||
let rates = rates.list_rates().await?;
|
||||
|
||||
for mut template in &mut self.templates {
|
||||
let list_price = CurrencyAmount(template.cost_plan.currency, template.cost_plan.amount);
|
||||
for template in &mut self.templates {
|
||||
let list_price =
|
||||
CurrencyAmount::from_f32(template.cost_plan.currency, template.cost_plan.amount);
|
||||
for alt_price in alt_prices(&rates, list_price) {
|
||||
template.cost_plan.other_price.push(ApiPrice {
|
||||
currency: alt_price.0,
|
||||
amount: alt_price.1,
|
||||
amount: alt_price.value_f32(),
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -252,7 +254,7 @@ impl From<CurrencyAmount> for ApiPrice {
|
||||
fn from(value: CurrencyAmount) -> Self {
|
||||
Self {
|
||||
currency: value.0,
|
||||
amount: value.1,
|
||||
amount: value.value_f32(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,8 +337,8 @@ impl ApiVmTemplate {
|
||||
cpu: template.cpu,
|
||||
memory: template.memory,
|
||||
disk_size: template.disk_size,
|
||||
disk_type: template.disk_type.clone().into(),
|
||||
disk_interface: template.disk_interface.clone().into(),
|
||||
disk_type: template.disk_type.into(),
|
||||
disk_interface: template.disk_interface.into(),
|
||||
cost_plan: ApiVmCostPlan {
|
||||
id: cost_plan.id,
|
||||
name: cost_plan.name.clone(),
|
||||
@ -402,6 +404,7 @@ pub struct AccountPatchRequest {
|
||||
pub email: Option<String>,
|
||||
pub contact_nip17: bool,
|
||||
pub contact_email: bool,
|
||||
pub country_code: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
@ -468,14 +471,15 @@ impl From<lnvps_db::VmOsImage> for ApiVmOsImage {
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
pub struct ApiVmPayment {
|
||||
/// Payment hash hex
|
||||
pub id: String,
|
||||
pub vm_id: u64,
|
||||
pub created: DateTime<Utc>,
|
||||
pub expires: DateTime<Utc>,
|
||||
pub amount: u64,
|
||||
pub invoice: String,
|
||||
pub tax: u64,
|
||||
pub currency: String,
|
||||
pub is_paid: bool,
|
||||
pub data: ApiPaymentData,
|
||||
}
|
||||
|
||||
impl From<lnvps_db::VmPayment> for ApiVmPayment {
|
||||
@ -486,8 +490,65 @@ impl From<lnvps_db::VmPayment> for ApiVmPayment {
|
||||
created: value.created,
|
||||
expires: value.expires,
|
||||
amount: value.amount,
|
||||
invoice: value.invoice,
|
||||
tax: value.tax,
|
||||
currency: value.currency,
|
||||
is_paid: value.is_paid,
|
||||
data: match &value.payment_method {
|
||||
PaymentMethod::Lightning => ApiPaymentData::Lightning(value.external_data),
|
||||
PaymentMethod::Revolut => {
|
||||
#[derive(Deserialize)]
|
||||
struct RevolutData {
|
||||
pub token: String,
|
||||
}
|
||||
let data: RevolutData = serde_json::from_str(&value.external_data).unwrap();
|
||||
ApiPaymentData::Revolut { token: data.token }
|
||||
}
|
||||
PaymentMethod::Paypal => {
|
||||
todo!()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
pub struct ApiPaymentInfo {
|
||||
pub name: ApiPaymentMethod,
|
||||
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub metadata: HashMap<String, String>,
|
||||
|
||||
pub currencies: Vec<Currency>,
|
||||
}
|
||||
|
||||
/// Payment data related to the payment method
|
||||
#[derive(Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ApiPaymentData {
|
||||
/// Just an LN invoice
|
||||
Lightning(String),
|
||||
/// Revolut order data
|
||||
Revolut {
|
||||
/// Order token
|
||||
token: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ApiPaymentMethod {
|
||||
#[default]
|
||||
Lightning,
|
||||
Revolut,
|
||||
Paypal,
|
||||
}
|
||||
|
||||
impl From<PaymentMethod> for ApiPaymentMethod {
|
||||
fn from(value: PaymentMethod) -> Self {
|
||||
match value {
|
||||
PaymentMethod::Lightning => ApiPaymentMethod::Lightning,
|
||||
PaymentMethod::Revolut => ApiPaymentMethod::Revolut,
|
||||
PaymentMethod::Paypal => ApiPaymentMethod::Paypal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,27 @@
|
||||
use crate::api::model::{
|
||||
AccountPatchRequest, ApiCustomTemplateDiskParam, ApiCustomTemplateParams, ApiCustomVmOrder,
|
||||
ApiCustomVmRequest, ApiPrice, ApiTemplatesResponse, ApiUserSshKey, ApiVmHostRegion,
|
||||
AccountPatchRequest, ApiCustomTemplateParams, ApiCustomVmOrder, ApiCustomVmRequest,
|
||||
ApiPaymentInfo, ApiPaymentMethod, ApiPrice, ApiTemplatesResponse, ApiUserSshKey,
|
||||
ApiVmIpAssignment, ApiVmOsImage, ApiVmPayment, ApiVmStatus, ApiVmTemplate, CreateSshKey,
|
||||
CreateVmRequest, VMPatchRequest,
|
||||
};
|
||||
use crate::exchange::ExchangeRateService;
|
||||
use crate::exchange::{Currency, ExchangeRateService};
|
||||
use crate::host::{get_host_client, FullVmInfo, TimeSeries, TimeSeriesData};
|
||||
use crate::nip98::Nip98Auth;
|
||||
use crate::provisioner::{HostCapacityService, LNVpsProvisioner, PricingEngine};
|
||||
use crate::settings::Settings;
|
||||
use crate::status::{VmState, VmStateCache};
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::{bail, Result};
|
||||
use futures::future::join_all;
|
||||
use lnvps_db::{IpRange, LNVpsDb, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate};
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::{
|
||||
IpRange, LNVpsDb, PaymentMethod, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate,
|
||||
};
|
||||
use log::{error, info};
|
||||
use nostr::util::hex;
|
||||
use rocket::futures::{SinkExt, StreamExt};
|
||||
use rocket::serde::json::Json;
|
||||
use rocket::{get, patch, post, Responder, Route, State};
|
||||
use rocket::{get, patch, post, routes, Responder, Route, State};
|
||||
use rocket_okapi::gen::OpenApiGenerator;
|
||||
use rocket_okapi::okapi::openapi3::Responses;
|
||||
use rocket_okapi::response::OpenApiResponderInner;
|
||||
@ -26,11 +30,15 @@ use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ssh_key::PublicKey;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::sync::mpsc::{Sender, UnboundedSender};
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
openapi_get_routes![
|
||||
let mut routes = vec![];
|
||||
|
||||
routes.append(&mut openapi_get_routes![
|
||||
v1_get_account,
|
||||
v1_patch_account,
|
||||
v1_list_vms,
|
||||
@ -45,11 +53,17 @@ pub fn routes() -> Vec<Route> {
|
||||
v1_start_vm,
|
||||
v1_stop_vm,
|
||||
v1_restart_vm,
|
||||
v1_reinstall_vm,
|
||||
v1_patch_vm,
|
||||
v1_time_series,
|
||||
v1_custom_template_calc,
|
||||
v1_create_custom_vm_order
|
||||
]
|
||||
v1_create_custom_vm_order,
|
||||
v1_get_payment_methods
|
||||
]);
|
||||
|
||||
routes.append(&mut routes![v1_terminal_proxy]);
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
type ApiResult<T> = Result<Json<ApiData<T>>, ApiError>;
|
||||
@ -103,6 +117,11 @@ async fn v1_patch_account(
|
||||
user.email = req.email.clone();
|
||||
user.contact_nip17 = req.contact_nip17;
|
||||
user.contact_email = req.contact_email;
|
||||
user.country_code = req
|
||||
.country_code
|
||||
.as_ref()
|
||||
.and_then(|c| CountryCode::for_alpha3(c).ok())
|
||||
.map(|c| c.alpha3().to_string());
|
||||
|
||||
db.update_user(&user).await?;
|
||||
ApiData::ok(())
|
||||
@ -123,6 +142,7 @@ async fn v1_get_account(
|
||||
email: user.email,
|
||||
contact_nip17: user.contact_nip17,
|
||||
contact_email: user.contact_email,
|
||||
country_code: user.country_code,
|
||||
})
|
||||
}
|
||||
|
||||
@ -143,7 +163,7 @@ async fn vm_to_status(
|
||||
.map(|i| (i.id, i))
|
||||
.collect();
|
||||
|
||||
let template = ApiVmTemplate::from_vm(&db, &vm).await?;
|
||||
let template = ApiVmTemplate::from_vm(db, &vm).await?;
|
||||
Ok(ApiVmStatus {
|
||||
id: vm.id,
|
||||
created: vm.created,
|
||||
@ -309,7 +329,7 @@ async fn v1_list_vm_templates(
|
||||
})
|
||||
.collect();
|
||||
let custom_templates: Vec<VmCustomPricing> =
|
||||
join_all(regions.iter().map(|(k, _)| db.list_custom_pricing(*k)))
|
||||
join_all(regions.keys().map(|k| db.list_custom_pricing(*k)))
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|r| r.ok())
|
||||
@ -344,17 +364,15 @@ async fn v1_list_vm_templates(
|
||||
.into_iter()
|
||||
.filter_map(|t| {
|
||||
let region = regions.get(&t.region_id)?;
|
||||
Some(
|
||||
ApiCustomTemplateParams::from(
|
||||
&t,
|
||||
&custom_template_disks,
|
||||
region,
|
||||
max_cpu,
|
||||
max_memory,
|
||||
max_disk,
|
||||
)
|
||||
.ok()?,
|
||||
ApiCustomTemplateParams::from(
|
||||
&t,
|
||||
&custom_template_disks,
|
||||
region,
|
||||
max_cpu,
|
||||
max_memory,
|
||||
max_disk,
|
||||
)
|
||||
.ok()
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
@ -376,7 +394,7 @@ async fn v1_custom_template_calc(
|
||||
|
||||
let price = PricingEngine::get_custom_vm_cost_amount(db, 0, &template).await?;
|
||||
ApiData::ok(ApiPrice {
|
||||
currency: price.currency.clone(),
|
||||
currency: price.currency,
|
||||
amount: price.total(),
|
||||
})
|
||||
}
|
||||
@ -484,12 +502,13 @@ async fn v1_create_vm_order(
|
||||
|
||||
/// Renew(Extend) a VM
|
||||
#[openapi(tag = "VM")]
|
||||
#[get("/api/v1/vm/<id>/renew")]
|
||||
#[get("/api/v1/vm/<id>/renew?<method>")]
|
||||
async fn v1_renew_vm(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
provisioner: &State<Arc<LNVpsProvisioner>>,
|
||||
id: u64,
|
||||
method: Option<&str>,
|
||||
) -> ApiResult<ApiVmPayment> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
@ -498,7 +517,14 @@ async fn v1_renew_vm(
|
||||
return ApiData::err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let rsp = provisioner.renew(id).await?;
|
||||
let rsp = provisioner
|
||||
.renew(
|
||||
id,
|
||||
method
|
||||
.and_then(|m| PaymentMethod::from_str(m).ok())
|
||||
.unwrap_or(PaymentMethod::Lightning),
|
||||
)
|
||||
.await?;
|
||||
ApiData::ok(rsp.into())
|
||||
}
|
||||
|
||||
@ -576,6 +602,32 @@ async fn v1_restart_vm(
|
||||
ApiData::ok(())
|
||||
}
|
||||
|
||||
/// Re-install a VM
|
||||
#[openapi(tag = "VM")]
|
||||
#[patch("/api/v1/vm/<id>/re-install")]
|
||||
async fn v1_reinstall_vm(
|
||||
auth: Nip98Auth,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
settings: &State<Settings>,
|
||||
worker: &State<UnboundedSender<WorkJob>>,
|
||||
id: u64,
|
||||
) -> ApiResult<()> {
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await?;
|
||||
let vm = db.get_vm(id).await?;
|
||||
if uid != vm.user_id {
|
||||
return ApiData::err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let host = db.get_host(vm.host_id).await?;
|
||||
let client = get_host_client(&host, &settings.provisioner)?;
|
||||
let info = FullVmInfo::load(vm.id, (*db).clone()).await?;
|
||||
client.reinstall_vm(&info).await?;
|
||||
|
||||
worker.send(WorkJob::CheckVm { vm_id: id })?;
|
||||
ApiData::ok(())
|
||||
}
|
||||
|
||||
#[openapi(tag = "VM")]
|
||||
#[get("/api/v1/vm/<id>/time-series")]
|
||||
async fn v1_time_series(
|
||||
@ -596,6 +648,134 @@ async fn v1_time_series(
|
||||
ApiData::ok(client.get_time_series_data(&vm, TimeSeries::Hourly).await?)
|
||||
}
|
||||
|
||||
#[get("/api/v1/vm/<id>/console?<auth>")]
|
||||
async fn v1_terminal_proxy(
|
||||
auth: &str,
|
||||
db: &State<Arc<dyn LNVpsDb>>,
|
||||
settings: &State<Settings>,
|
||||
id: u64,
|
||||
ws: ws::WebSocket,
|
||||
) -> Result<ws::Channel<'static>, &'static str> {
|
||||
return Err("Disabled");
|
||||
let auth = Nip98Auth::from_base64(auth).map_err(|e| "Missing or invalid auth param")?;
|
||||
if auth
|
||||
.check(&format!("/api/v1/vm/{id}/console"), "GET")
|
||||
.is_err()
|
||||
{
|
||||
return Err("Invalid auth event");
|
||||
}
|
||||
let pubkey = auth.event.pubkey.to_bytes();
|
||||
let uid = db.upsert_user(&pubkey).await.map_err(|_| "Insert failed")?;
|
||||
let vm = db.get_vm(id).await.map_err(|_| "VM not found")?;
|
||||
if uid != vm.user_id {
|
||||
return Err("VM does not belong to you");
|
||||
}
|
||||
|
||||
let host = db
|
||||
.get_host(vm.host_id)
|
||||
.await
|
||||
.map_err(|_| "VM host not found")?;
|
||||
let client =
|
||||
get_host_client(&host, &settings.provisioner).map_err(|_| "Failed to get host client")?;
|
||||
|
||||
let mut ws_upstream = client.connect_terminal(&vm).await.map_err(|e| {
|
||||
error!("Failed to start terminal proxy: {}", e);
|
||||
"Failed to open terminal proxy"
|
||||
})?;
|
||||
let ws = ws.config(Default::default());
|
||||
Ok(ws.channel(move |mut stream| {
|
||||
use ws::*;
|
||||
|
||||
Box::pin(async move {
|
||||
async fn process_client<E>(
|
||||
msg: Result<Message, E>,
|
||||
ws_upstream: &mut Sender<Vec<u8>>,
|
||||
) -> Result<()>
|
||||
where
|
||||
E: Display,
|
||||
{
|
||||
match msg {
|
||||
Ok(m) => {
|
||||
let m_up = match m {
|
||||
Message::Text(t) => t.as_bytes().to_vec(),
|
||||
_ => panic!("todo"),
|
||||
};
|
||||
if let Err(e) = ws_upstream.send(m_up).await {
|
||||
bail!("Failed to send msg to upstream: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
bail!("Failed to read from client: {}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_upstream<E>(
|
||||
msg: Result<Vec<u8>, E>,
|
||||
tx_client: &mut stream::DuplexStream,
|
||||
) -> Result<()>
|
||||
where
|
||||
E: Display,
|
||||
{
|
||||
match msg {
|
||||
Ok(m) => {
|
||||
let down = String::from_utf8_lossy(&m).into_owned();
|
||||
let m_down = Message::Text(down);
|
||||
if let Err(e) = tx_client.send(m_down).await {
|
||||
bail!("Failed to msg to client: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
bail!("Failed to read from upstream: {}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(msg) = stream.next() => {
|
||||
if let Err(e) = process_client(msg, &mut ws_upstream.tx).await {
|
||||
error!("{}", e);
|
||||
break;
|
||||
}
|
||||
},
|
||||
Some(r) = ws_upstream.rx.recv() => {
|
||||
let msg: Result<Vec<u8>, anyhow::Error> = Ok(r);
|
||||
if let Err(e) = process_upstream(msg, &mut stream).await {
|
||||
error!("{}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("Websocket closed");
|
||||
Ok(())
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
#[openapi(tag = "Payment")]
|
||||
#[get("/api/v1/payment/methods")]
|
||||
async fn v1_get_payment_methods(settings: &State<Settings>) -> ApiResult<Vec<ApiPaymentInfo>> {
|
||||
let mut ret = vec![ApiPaymentInfo {
|
||||
name: ApiPaymentMethod::Lightning,
|
||||
metadata: HashMap::new(),
|
||||
currencies: vec![Currency::BTC],
|
||||
}];
|
||||
#[cfg(feature = "revolut")]
|
||||
if let Some(r) = &settings.revolut {
|
||||
ret.push(ApiPaymentInfo {
|
||||
name: ApiPaymentMethod::Revolut,
|
||||
metadata: HashMap::from([("pubkey".to_string(), r.public_key.to_string())]),
|
||||
currencies: vec![Currency::EUR, Currency::USD],
|
||||
})
|
||||
}
|
||||
|
||||
ApiData::ok(ret)
|
||||
}
|
||||
|
||||
/// Get payment status (for polling)
|
||||
#[openapi(tag = "Payment")]
|
||||
#[get("/api/v1/payment/<id>")]
|
||||
|
@ -6,25 +6,38 @@ use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
/// Messaging bridge for webhooks to other parts of the system (bitvora)
|
||||
/// Messaging bridge for webhooks to other parts of the system (bitvora/revout)
|
||||
pub static WEBHOOK_BRIDGE: LazyLock<WebhookBridge> = LazyLock::new(WebhookBridge::new);
|
||||
|
||||
pub fn routes() -> Vec<Route> {
|
||||
if cfg!(feature = "bitvora") {
|
||||
routes![bitvora_webhook]
|
||||
} else {
|
||||
routes![]
|
||||
}
|
||||
let mut routes = vec![];
|
||||
|
||||
#[cfg(feature = "bitvora")]
|
||||
routes.append(&mut routes![bitvora_webhook]);
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
routes.append(&mut routes![revolut_webhook]);
|
||||
|
||||
routes
|
||||
}
|
||||
|
||||
#[cfg(feature = "bitvora")]
|
||||
#[post("/api/v1/webhook/bitvora", data = "<req>")]
|
||||
async fn bitvora_webhook(req: WebhookMessage) -> Status {
|
||||
WEBHOOK_BRIDGE.send(req);
|
||||
Status::Ok
|
||||
}
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
#[post("/api/v1/webhook/revolut", data = "<req>")]
|
||||
async fn revolut_webhook(req: WebhookMessage) -> Status {
|
||||
WEBHOOK_BRIDGE.send(req);
|
||||
Status::Ok
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WebhookMessage {
|
||||
pub endpoint: String,
|
||||
pub body: Vec<u8>,
|
||||
pub headers: HashMap<String, String>,
|
||||
}
|
||||
@ -48,6 +61,7 @@ impl<'r> FromData<'r> for WebhookMessage {
|
||||
return rocket::data::Outcome::Error((Status::BadRequest, ()));
|
||||
};
|
||||
let msg = WebhookMessage {
|
||||
endpoint: req.uri().path().to_string(),
|
||||
headers: header,
|
||||
body: body.value.to_vec(),
|
||||
};
|
||||
|
@ -1,25 +1,26 @@
|
||||
use anyhow::Error;
|
||||
use chrono::Utc;
|
||||
use clap::Parser;
|
||||
use config::{Config, File};
|
||||
use lnvps::api;
|
||||
use lnvps::cors::CORS;
|
||||
use lnvps::data_migration::run_data_migrations;
|
||||
use lnvps::dvm::start_dvms;
|
||||
use lnvps::exchange::{DefaultRateCache, ExchangeRateService};
|
||||
use lnvps::invoice::InvoiceHandler;
|
||||
use lnvps::lightning::get_node;
|
||||
use lnvps::payments::listen_all_payments;
|
||||
use lnvps::settings::Settings;
|
||||
use lnvps::status::VmStateCache;
|
||||
use lnvps::worker::{WorkJob, Worker};
|
||||
use lnvps_db::{LNVpsDb, LNVpsDbMysql};
|
||||
use log::{error, LevelFilter};
|
||||
use log::error;
|
||||
use nostr::Keys;
|
||||
use nostr_sdk::Client;
|
||||
use rocket::http::Method;
|
||||
use rocket_okapi::swagger_ui::{make_swagger_ui, SwaggerUIConfig};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[clap(about, version, author)]
|
||||
@ -35,37 +36,9 @@ struct Args {
|
||||
|
||||
#[rocket::main]
|
||||
async fn main() -> Result<(), Error> {
|
||||
let log_level = std::env::var("RUST_LOG")
|
||||
.unwrap_or_else(|_| "info".to_string()) // Default to "info" if not set
|
||||
.to_lowercase();
|
||||
|
||||
let max_level = match log_level.as_str() {
|
||||
"trace" => LevelFilter::Trace,
|
||||
"debug" => LevelFilter::Debug,
|
||||
"info" => LevelFilter::Info,
|
||||
"warn" => LevelFilter::Warn,
|
||||
"error" => LevelFilter::Error,
|
||||
"off" => LevelFilter::Off,
|
||||
_ => LevelFilter::Info,
|
||||
};
|
||||
env_logger::init();
|
||||
|
||||
let args = Args::parse();
|
||||
fern::Dispatch::new()
|
||||
.level(max_level)
|
||||
.level_for("rocket", LevelFilter::Error)
|
||||
.chain(fern::log_file(
|
||||
args.log.unwrap_or(PathBuf::from(".")).join("main.log"),
|
||||
)?)
|
||||
.chain(std::io::stdout())
|
||||
.format(|out, message, record| {
|
||||
out.finish(format_args!(
|
||||
"[{}] [{}] {}",
|
||||
Utc::now().format("%Y-%m-%d %H:%M:%S"),
|
||||
record.level(),
|
||||
message
|
||||
))
|
||||
})
|
||||
.apply()?;
|
||||
|
||||
let settings: Settings = Config::builder()
|
||||
.add_source(File::from(
|
||||
@ -102,6 +75,9 @@ async fn main() -> Result<(), Error> {
|
||||
let provisioner = settings.get_provisioner(db.clone(), node.clone(), exchange.clone());
|
||||
provisioner.init().await?;
|
||||
|
||||
// run data migrations
|
||||
run_data_migrations(db.clone(), provisioner.clone(), &settings).await?;
|
||||
|
||||
let mut worker = Worker::new(
|
||||
db.clone(),
|
||||
provisioner.clone(),
|
||||
@ -117,15 +93,10 @@ async fn main() -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
});
|
||||
let mut handler = InvoiceHandler::new(node.clone(), db.clone(), sender.clone());
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = handler.listen().await {
|
||||
error!("invoice-error: {}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
});
|
||||
|
||||
// setup payment handlers
|
||||
listen_all_payments(&settings, node.clone(), db.clone(), sender.clone())?;
|
||||
|
||||
// request work every 30s to check vm status
|
||||
let sender_clone = sender.clone();
|
||||
tokio::spawn(async move {
|
||||
@ -152,6 +123,15 @@ async fn main() -> Result<(), Error> {
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "nostr-dvm")]
|
||||
{
|
||||
let nostr_client = nostr_client.unwrap();
|
||||
start_dvms(nostr_client.clone(), provisioner.clone());
|
||||
}
|
||||
|
||||
// request for host info to be patched
|
||||
sender.send(WorkJob::PatchHosts)?;
|
||||
|
||||
let mut config = rocket::Config::default();
|
||||
let ip: SocketAddr = match &settings.listen {
|
||||
Some(i) => i.parse()?,
|
||||
@ -161,7 +141,6 @@ async fn main() -> Result<(), Error> {
|
||||
config.port = ip.port();
|
||||
|
||||
if let Err(e) = rocket::Rocket::custom(config)
|
||||
.attach(CORS)
|
||||
.manage(db.clone())
|
||||
.manage(provisioner.clone())
|
||||
.manage(status.clone())
|
||||
@ -176,6 +155,16 @@ async fn main() -> Result<(), Error> {
|
||||
..Default::default()
|
||||
}),
|
||||
)
|
||||
.attach(CORS)
|
||||
.mount(
|
||||
"/",
|
||||
vec![rocket::Route::ranked(
|
||||
isize::MAX,
|
||||
Method::Options,
|
||||
"/<catch_all_options_route..>",
|
||||
CORS,
|
||||
)],
|
||||
)
|
||||
.launch()
|
||||
.await
|
||||
{
|
||||
|
22
src/cors.rs
22
src/cors.rs
@ -1,8 +1,9 @@
|
||||
use rocket::fairing::{Fairing, Info, Kind};
|
||||
use rocket::http::{Header, Method, Status};
|
||||
use rocket::{Request, Response};
|
||||
use std::io::Cursor;
|
||||
use rocket::http::Header;
|
||||
use rocket::route::{Handler, Outcome};
|
||||
use rocket::{Data, Request, Response};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CORS;
|
||||
|
||||
#[rocket::async_trait]
|
||||
@ -14,7 +15,7 @@ impl Fairing for CORS {
|
||||
}
|
||||
}
|
||||
|
||||
async fn on_response<'r>(&self, req: &'r Request<'_>, response: &mut Response<'r>) {
|
||||
async fn on_response<'r>(&self, _req: &'r Request<'_>, response: &mut Response<'r>) {
|
||||
response.set_header(Header::new("Access-Control-Allow-Origin", "*"));
|
||||
response.set_header(Header::new(
|
||||
"Access-Control-Allow-Methods",
|
||||
@ -22,11 +23,12 @@ impl Fairing for CORS {
|
||||
));
|
||||
response.set_header(Header::new("Access-Control-Allow-Headers", "*"));
|
||||
response.set_header(Header::new("Access-Control-Allow-Credentials", "true"));
|
||||
|
||||
// force status 200 for options requests
|
||||
if req.method() == Method::Options {
|
||||
response.set_status(Status::Ok);
|
||||
response.set_sized_body(None, Cursor::new(""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rocket::async_trait]
|
||||
impl Handler for CORS {
|
||||
async fn handle<'r>(&self, _request: &'r Request<'_>, _data: Data<'r>) -> Outcome<'r> {
|
||||
Outcome::Success(Response::new())
|
||||
}
|
||||
}
|
||||
|
66
src/data_migration/dns.rs
Normal file
66
src/data_migration/dns.rs
Normal file
@ -0,0 +1,66 @@
|
||||
use crate::data_migration::DataMigration;
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::settings::Settings;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct DnsDataMigration {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
dns: Arc<dyn DnsServer>,
|
||||
forward_zone_id: Option<String>,
|
||||
}
|
||||
|
||||
impl DnsDataMigration {
|
||||
pub fn new(db: Arc<dyn LNVpsDb>, settings: &Settings) -> Option<Self> {
|
||||
let dns = settings.get_dns().ok().flatten()?;
|
||||
Some(Self {
|
||||
db,
|
||||
dns,
|
||||
forward_zone_id: settings.dns.as_ref().map(|z| z.forward_zone_id.to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DataMigration for DnsDataMigration {
|
||||
fn migrate(&self) -> Pin<Box<dyn Future<Output = Result<()>> + Send>> {
|
||||
let db = self.db.clone();
|
||||
let dns = self.dns.clone();
|
||||
let forward_zone_id = self.forward_zone_id.clone();
|
||||
Box::pin(async move {
|
||||
let zone_id = if let Some(z) = forward_zone_id {
|
||||
z
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
let vms = db.list_vms().await?;
|
||||
|
||||
for vm in vms {
|
||||
let mut ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
for ip in &mut ips {
|
||||
let mut did_change = false;
|
||||
if ip.dns_forward.is_none() {
|
||||
let rec = BasicRecord::forward(ip)?;
|
||||
let r = dns.add_record(&zone_id, &rec).await?;
|
||||
ip.dns_forward = Some(r.name);
|
||||
ip.dns_forward_ref = r.id;
|
||||
did_change = true;
|
||||
}
|
||||
if ip.dns_reverse.is_none() {
|
||||
let rec = BasicRecord::reverse_to_fwd(ip)?;
|
||||
let r = dns.add_record(&zone_id, &rec).await?;
|
||||
ip.dns_reverse = Some(r.value);
|
||||
ip.dns_reverse_ref = r.id;
|
||||
did_change = true;
|
||||
}
|
||||
if did_change {
|
||||
db.update_vm_ip_assignment(ip).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
49
src/data_migration/ip6_init.rs
Normal file
49
src/data_migration/ip6_init.rs
Normal file
@ -0,0 +1,49 @@
|
||||
use crate::data_migration::DataMigration;
|
||||
use crate::provisioner::{LNVpsProvisioner, NetworkProvisioner};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use log::info;
|
||||
|
||||
pub struct Ip6InitDataMigration {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
provisioner: Arc<LNVpsProvisioner>,
|
||||
}
|
||||
|
||||
impl Ip6InitDataMigration {
|
||||
pub fn new(db: Arc<dyn LNVpsDb>, provisioner: Arc<LNVpsProvisioner>) -> Ip6InitDataMigration {
|
||||
Self { db, provisioner }
|
||||
}
|
||||
}
|
||||
|
||||
impl DataMigration for Ip6InitDataMigration {
|
||||
fn migrate(&self) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + Send>> {
|
||||
let db = self.db.clone();
|
||||
let provisioner = self.provisioner.clone();
|
||||
Box::pin(async move {
|
||||
let net = NetworkProvisioner::new(db.clone());
|
||||
let vms = db.list_vms().await?;
|
||||
for vm in vms {
|
||||
let host = db.get_host(vm.host_id).await?;
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
// if no ipv6 address is picked already pick one
|
||||
if ips.iter().all(|i| {
|
||||
IpNetwork::from_str(&i.ip)
|
||||
.map(|i| i.is_ipv4())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
{
|
||||
let ips_pick = net.pick_ip_for_region(host.region_id).await?;
|
||||
if let Some(mut v6) = ips_pick.ip6 {
|
||||
info!("Assigning ip {} to vm {}", v6.ip, vm.id);
|
||||
provisioner.assign_available_v6_to_vm(&vm, &mut v6).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
36
src/data_migration/mod.rs
Normal file
36
src/data_migration/mod.rs
Normal file
@ -0,0 +1,36 @@
|
||||
use crate::data_migration::dns::DnsDataMigration;
|
||||
use crate::settings::Settings;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::{error, info};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use crate::data_migration::ip6_init::Ip6InitDataMigration;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
|
||||
mod dns;
|
||||
mod ip6_init;
|
||||
|
||||
/// Basic data migration to run at startup
|
||||
pub trait DataMigration: Send + Sync {
|
||||
fn migrate(&self) -> Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||
}
|
||||
|
||||
pub async fn run_data_migrations(db: Arc<dyn LNVpsDb>, lnvps: Arc<LNVpsProvisioner>, settings: &Settings) -> Result<()> {
|
||||
let mut migrations: Vec<Box<dyn DataMigration>> = vec![];
|
||||
migrations.push(Box::new(Ip6InitDataMigration::new(db.clone(), lnvps.clone())));
|
||||
|
||||
if let Some(d) = DnsDataMigration::new(db.clone(), settings) {
|
||||
migrations.push(Box::new(d));
|
||||
}
|
||||
|
||||
info!("Running {} data migrations", migrations.len());
|
||||
for migration in migrations {
|
||||
if let Err(e) = migration.migrate().await {
|
||||
error!("Error running data migration: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
use crate::dns::{BasicRecord, DnsServer, RecordType};
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::json_api::JsonApi;
|
||||
use anyhow::Context;
|
||||
use lnvps_db::async_trait;
|
||||
@ -7,17 +7,17 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
pub struct Cloudflare {
|
||||
api: JsonApi,
|
||||
reverse_zone_id: String,
|
||||
forward_zone_id: String,
|
||||
}
|
||||
|
||||
impl Cloudflare {
|
||||
pub fn new(token: &str, reverse_zone_id: &str, forward_zone_id: &str) -> Cloudflare {
|
||||
pub fn new(token: &str) -> Cloudflare {
|
||||
Self {
|
||||
api: JsonApi::token("https://api.cloudflare.com", &format!("Bearer {}", token))
|
||||
.unwrap(),
|
||||
reverse_zone_id: reverse_zone_id.to_owned(),
|
||||
forward_zone_id: forward_zone_id.to_owned(),
|
||||
api: JsonApi::token(
|
||||
"https://api.cloudflare.com",
|
||||
&format!("Bearer {}", token),
|
||||
false,
|
||||
)
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -41,11 +41,7 @@ impl Cloudflare {
|
||||
|
||||
#[async_trait]
|
||||
impl DnsServer for Cloudflare {
|
||||
async fn add_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let zone_id = match &record.kind {
|
||||
RecordType::PTR => &self.reverse_zone_id,
|
||||
_ => &self.forward_zone_id,
|
||||
};
|
||||
async fn add_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
info!(
|
||||
"Adding record: [{}] {} => {}",
|
||||
record.kind, record.name, record.value
|
||||
@ -71,11 +67,7 @@ impl DnsServer for Cloudflare {
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_record(&self, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let zone_id = match &record.kind {
|
||||
RecordType::PTR => &self.reverse_zone_id,
|
||||
_ => &self.forward_zone_id,
|
||||
};
|
||||
async fn delete_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let record_id = record.id.as_ref().context("record id missing")?;
|
||||
info!(
|
||||
"Deleting record: [{}] {} => {}",
|
||||
@ -98,11 +90,11 @@ impl DnsServer for Cloudflare {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let zone_id = match &record.kind {
|
||||
RecordType::PTR => &self.reverse_zone_id,
|
||||
_ => &self.forward_zone_id,
|
||||
};
|
||||
async fn update_record(
|
||||
&self,
|
||||
zone_id: &str,
|
||||
record: &BasicRecord,
|
||||
) -> anyhow::Result<BasicRecord> {
|
||||
info!(
|
||||
"Updating record: [{}] {} => {}",
|
||||
record.kind, record.name, record.value
|
||||
|
@ -8,17 +8,18 @@ use std::str::FromStr;
|
||||
mod cloudflare;
|
||||
#[cfg(feature = "cloudflare")]
|
||||
pub use cloudflare::*;
|
||||
use crate::provisioner::NetworkProvisioner;
|
||||
|
||||
#[async_trait]
|
||||
pub trait DnsServer: Send + Sync {
|
||||
/// Add PTR record to the reverse zone
|
||||
async fn add_record(&self, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
async fn add_record(&self, zone_id: &str, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
|
||||
/// Delete PTR record from the reverse zone
|
||||
async fn delete_record(&self, record: &BasicRecord) -> Result<()>;
|
||||
async fn delete_record(&self, zone_id: &str, record: &BasicRecord) -> Result<()>;
|
||||
|
||||
/// Update a record
|
||||
async fn update_record(&self, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
async fn update_record(&self, zone_id: &str, record: &BasicRecord) -> Result<BasicRecord>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -77,7 +78,7 @@ impl BasicRecord {
|
||||
Ok(Self {
|
||||
name: match addr {
|
||||
IpAddr::V4(i) => i.octets()[3].to_string(),
|
||||
IpAddr::V6(_) => bail!("IPv6 PTR not supported"),
|
||||
IpAddr::V6(i) => NetworkProvisioner::ipv6_to_ptr(&i)?,
|
||||
},
|
||||
value: fwd,
|
||||
id: ip.dns_reverse_ref.clone(),
|
||||
@ -98,7 +99,7 @@ impl BasicRecord {
|
||||
Ok(Self {
|
||||
name: match addr {
|
||||
IpAddr::V4(i) => i.octets()[3].to_string(),
|
||||
IpAddr::V6(_) => bail!("IPv6 PTR not supported"),
|
||||
IpAddr::V6(i) => NetworkProvisioner::ipv6_to_ptr(&i)?,
|
||||
},
|
||||
value: rev,
|
||||
id: ip.dns_reverse_ref.clone(),
|
||||
|
232
src/dvm/lnvps.rs
Normal file
232
src/dvm/lnvps.rs
Normal file
@ -0,0 +1,232 @@
|
||||
use crate::dvm::{build_status_for_job, DVMHandler, DVMJobRequest};
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::{GB, MB};
|
||||
use anyhow::Context;
|
||||
use lnvps_db::{
|
||||
DiskInterface, DiskType, LNVpsDb, OsDistribution, PaymentMethod, UserSshKey, VmCustomTemplate,
|
||||
};
|
||||
use nostr::prelude::DataVendingMachineStatus;
|
||||
use nostr::Tag;
|
||||
use nostr_sdk::Client;
|
||||
use ssh_key::PublicKey;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct LnvpsDvm {
|
||||
client: Client,
|
||||
provisioner: Arc<LNVpsProvisioner>,
|
||||
}
|
||||
|
||||
impl LnvpsDvm {
|
||||
pub fn new(provisioner: Arc<LNVpsProvisioner>, client: Client) -> LnvpsDvm {
|
||||
Self {
|
||||
provisioner,
|
||||
client,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DVMHandler for LnvpsDvm {
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
request: DVMJobRequest,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<()>> + Send>> {
|
||||
let provisioner = self.provisioner.clone();
|
||||
let client = self.client.clone();
|
||||
Box::pin(async move {
|
||||
let default_disk = "ssd".to_string();
|
||||
let default_interface = "pcie".to_string();
|
||||
let cpu = request.params.get("cpu").context("missing cpu parameter")?;
|
||||
let memory = request
|
||||
.params
|
||||
.get("memory")
|
||||
.context("missing memory parameter")?;
|
||||
let disk = request
|
||||
.params
|
||||
.get("disk")
|
||||
.context("missing disk parameter")?;
|
||||
let disk_type = request.params.get("disk_type").unwrap_or(&default_disk);
|
||||
let disk_interface = request
|
||||
.params
|
||||
.get("disk_interface")
|
||||
.unwrap_or(&default_interface);
|
||||
let ssh_key = request
|
||||
.params
|
||||
.get("ssh_key")
|
||||
.context("missing ssh_key parameter")?;
|
||||
let ssh_key_name = request.params.get("ssh_key_name");
|
||||
let os_image = request.params.get("os").context("missing os parameter")?;
|
||||
let os_version = request
|
||||
.params
|
||||
.get("os_version")
|
||||
.context("missing os_version parameter")?;
|
||||
let region = request.params.get("region");
|
||||
|
||||
let db = provisioner.get_db();
|
||||
let host_region = if let Some(r) = region {
|
||||
db.get_host_region_by_name(r).await?
|
||||
} else {
|
||||
db.list_host_region()
|
||||
.await?
|
||||
.into_iter()
|
||||
.next()
|
||||
.context("no host region")?
|
||||
};
|
||||
let pricing = db.list_custom_pricing(host_region.id).await?;
|
||||
|
||||
// we expect only 1 pricing per region
|
||||
let pricing = pricing
|
||||
.first()
|
||||
.context("no custom pricing found in region")?;
|
||||
|
||||
let template = VmCustomTemplate {
|
||||
id: 0,
|
||||
cpu: cpu.parse()?,
|
||||
memory: MB * memory.parse::<u64>()?,
|
||||
disk_size: GB * disk.parse::<u64>()?,
|
||||
disk_type: DiskType::from_str(disk_type)?,
|
||||
disk_interface: DiskInterface::from_str(disk_interface)?,
|
||||
pricing_id: pricing.id,
|
||||
};
|
||||
let uid = db.upsert_user(request.event.pubkey.as_bytes()).await?;
|
||||
|
||||
let pk: PublicKey = ssh_key.parse()?;
|
||||
let key_name = if let Some(n) = ssh_key_name {
|
||||
n.clone()
|
||||
} else {
|
||||
pk.comment().to_string()
|
||||
};
|
||||
let new_key = UserSshKey {
|
||||
name: key_name,
|
||||
user_id: uid,
|
||||
key_data: pk.to_openssh()?,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// report as started if params are valid
|
||||
let processing =
|
||||
build_status_for_job(&request, DataVendingMachineStatus::Processing, None, None);
|
||||
client.send_event_builder(processing).await?;
|
||||
|
||||
let existing_keys = db.list_user_ssh_key(uid).await?;
|
||||
let ssh_key_id = if let Some(k) = existing_keys.iter().find(|k| {
|
||||
let ek: PublicKey = k.key_data.parse().unwrap();
|
||||
ek.eq(&pk)
|
||||
}) {
|
||||
k.id
|
||||
} else {
|
||||
db.insert_user_ssh_key(&new_key).await?
|
||||
};
|
||||
|
||||
let image = OsDistribution::from_str(os_image)?;
|
||||
let image = db
|
||||
.list_os_image()
|
||||
.await?
|
||||
.into_iter()
|
||||
.find(|i| i.distribution == image && i.version == *os_version)
|
||||
.context("no os image found")?;
|
||||
|
||||
let vm = provisioner
|
||||
.provision_custom(uid, template, image.id, ssh_key_id, None)
|
||||
.await?;
|
||||
let invoice = provisioner.renew(vm.id, PaymentMethod::Lightning).await?;
|
||||
|
||||
let mut payment = build_status_for_job(
|
||||
&request,
|
||||
DataVendingMachineStatus::PaymentRequired,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
payment = payment.tag(Tag::parse([
|
||||
"amount",
|
||||
invoice.amount.to_string().as_str(),
|
||||
&invoice.external_data,
|
||||
])?);
|
||||
client.send_event_builder(payment).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::dvm::parse_job_request;
|
||||
use crate::exchange::{ExchangeRateService, Ticker};
|
||||
use crate::mocks::{MockDb, MockExchangeRate, MockNode};
|
||||
use crate::settings::mock_settings;
|
||||
use lnvps_db::{VmCustomPricing, VmCustomPricingDisk};
|
||||
use nostr::{EventBuilder, Keys, Kind};
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn test_dvm() -> anyhow::Result<()> {
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::new());
|
||||
let exch = Arc::new(MockExchangeRate::new());
|
||||
exch.set_rate(Ticker::btc_rate("EUR")?, 69_420.0).await;
|
||||
|
||||
{
|
||||
let mut cp = db.custom_pricing.lock().await;
|
||||
cp.insert(
|
||||
1,
|
||||
VmCustomPricing {
|
||||
id: 1,
|
||||
name: "mock".to_string(),
|
||||
enabled: true,
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
region_id: 1,
|
||||
currency: "EUR".to_string(),
|
||||
cpu_cost: 1.5,
|
||||
memory_cost: 0.5,
|
||||
ip4_cost: 1.5,
|
||||
ip6_cost: 0.05,
|
||||
},
|
||||
);
|
||||
let mut cpd = db.custom_pricing_disk.lock().await;
|
||||
cpd.insert(
|
||||
1,
|
||||
VmCustomPricingDisk {
|
||||
id: 1,
|
||||
pricing_id: 1,
|
||||
kind: DiskType::SSD,
|
||||
interface: DiskInterface::PCIe,
|
||||
cost: 0.05,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let settings = mock_settings();
|
||||
let provisioner = Arc::new(LNVpsProvisioner::new(
|
||||
settings,
|
||||
db.clone(),
|
||||
node.clone(),
|
||||
exch.clone(),
|
||||
));
|
||||
let keys = Keys::generate();
|
||||
let empty_client = Client::new(keys.clone());
|
||||
empty_client.add_relay("wss://nos.lol").await?;
|
||||
empty_client.connect().await;
|
||||
|
||||
let mut dvm = LnvpsDvm::new(provisioner.clone(), empty_client.clone());
|
||||
|
||||
let ev = EventBuilder::new(Kind::from_u16(5999), "")
|
||||
.tags([
|
||||
Tag::parse(["param", "cpu", "1"])?,
|
||||
Tag::parse(["param", "memory", "1024"])?,
|
||||
Tag::parse(["param", "disk", "50"])?,
|
||||
Tag::parse(["param", "disk_type", "ssd"])?,
|
||||
Tag::parse(["param", "ssh_key", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGUSrwzZfbjqY81RRC7eg3zRvg0D53HOhjbG6h0SY3f3"])?,
|
||||
])
|
||||
.sign(&keys)
|
||||
.await?;
|
||||
let req = parse_job_request(&ev)?;
|
||||
dvm.handle_request(req).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
257
src/dvm/mod.rs
Normal file
257
src/dvm/mod.rs
Normal file
@ -0,0 +1,257 @@
|
||||
mod lnvps;
|
||||
|
||||
use crate::dvm::lnvps::LnvpsDvm;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use anyhow::Result;
|
||||
use futures::FutureExt;
|
||||
use log::{error, info, warn};
|
||||
use nostr::Filter;
|
||||
use nostr_sdk::prelude::DataVendingMachineStatus;
|
||||
use nostr_sdk::{
|
||||
Client, Event, EventBuilder, EventId, Kind, RelayPoolNotification, Tag, Timestamp, Url,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DVMJobRequest {
|
||||
/// The source event
|
||||
pub event: Event,
|
||||
/// Input data for the job (zero or more inputs)
|
||||
pub inputs: Vec<DVMInput>,
|
||||
/// Expected output format. Different job request kind defines this more precisely.
|
||||
pub output_type: Option<String>,
|
||||
/// Optional parameters for the job as key (first argument)/value (second argument).
|
||||
/// Different job request kind defines this more precisely. (e.g. [ "param", "lang", "es" ])
|
||||
pub params: HashMap<String, String>,
|
||||
/// Customer MAY specify a maximum amount (in millisats) they are willing to pay
|
||||
pub bid: Option<u64>,
|
||||
/// List of relays where Service Providers SHOULD publish responses to
|
||||
pub relays: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DVMInput {
|
||||
Url {
|
||||
url: Url,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
Event {
|
||||
event: EventId,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
Job {
|
||||
event: EventId,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
Text {
|
||||
data: String,
|
||||
relay: Option<String>,
|
||||
marker: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Basic DVM handler that accepts a job request
|
||||
pub trait DVMHandler: Send + Sync {
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
request: DVMJobRequest,
|
||||
) -> Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||
}
|
||||
|
||||
pub(crate) fn build_status_for_job(
|
||||
req: &DVMJobRequest,
|
||||
status: DataVendingMachineStatus,
|
||||
extra: Option<&str>,
|
||||
content: Option<&str>,
|
||||
) -> EventBuilder {
|
||||
EventBuilder::new(Kind::JobFeedback, content.unwrap_or("")).tags([
|
||||
Tag::parse(["status", status.to_string().as_str(), extra.unwrap_or("")]).unwrap(),
|
||||
Tag::expiration(Timestamp::now() + Duration::from_secs(30)),
|
||||
Tag::event(req.event.id),
|
||||
Tag::public_key(req.event.pubkey),
|
||||
])
|
||||
}
|
||||
|
||||
/// Start listening for jobs with a specific handler
|
||||
fn listen_for_jobs(
|
||||
client: Client,
|
||||
kind: Kind,
|
||||
mut dvm: Box<dyn DVMHandler>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<()>> + Send>> {
|
||||
Box::pin(async move {
|
||||
let sub = client
|
||||
.subscribe(Filter::new().kind(kind).since(Timestamp::now()), None)
|
||||
.await?;
|
||||
|
||||
info!("Listening for jobs: {}", kind);
|
||||
let mut rx = client.notifications();
|
||||
while let Ok(e) = rx.recv().await {
|
||||
match e {
|
||||
RelayPoolNotification::Event { event, .. } if event.kind == kind => {
|
||||
match parse_job_request(&event) {
|
||||
Ok(req) => {
|
||||
if let Err(e) = dvm.handle_request(req.clone()).await {
|
||||
error!("Error handling job request: {}", e);
|
||||
|
||||
let data = build_status_for_job(
|
||||
&req,
|
||||
DataVendingMachineStatus::Error,
|
||||
Some(e.to_string().as_str()),
|
||||
None,
|
||||
);
|
||||
client.send_event_builder(data).await?;
|
||||
}
|
||||
}
|
||||
Err(e) => warn!("Invalid job request: {:?}", e),
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
client.unsubscribe(&sub).await;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_job_request(event: &Event) -> Result<DVMJobRequest> {
|
||||
let mut inputs = vec![];
|
||||
for i_tag in event
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|t| t.kind().as_str() == "i")
|
||||
.map(|t| t.as_slice())
|
||||
{
|
||||
let input = match i_tag[2].as_str() {
|
||||
"url" => DVMInput::Url {
|
||||
url: if let Ok(u) = i_tag[1].parse() {
|
||||
u
|
||||
} else {
|
||||
warn!("Invalid url: {}", i_tag[1]);
|
||||
continue;
|
||||
},
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
"event" => DVMInput::Event {
|
||||
event: if let Ok(t) = EventId::parse(&i_tag[1]) {
|
||||
t
|
||||
} else {
|
||||
warn!("Invalid event id: {}", i_tag[1]);
|
||||
continue;
|
||||
},
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
"job" => DVMInput::Job {
|
||||
event: if let Ok(t) = EventId::parse(&i_tag[1]) {
|
||||
t
|
||||
} else {
|
||||
warn!("Invalid event id in job: {}", i_tag[1]);
|
||||
continue;
|
||||
},
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
"text" => DVMInput::Text {
|
||||
data: i_tag[1].to_string(),
|
||||
relay: if i_tag.len() > 3 {
|
||||
Some(i_tag[3].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
marker: if i_tag.len() > 4 {
|
||||
Some(i_tag[4].to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
t => {
|
||||
warn!("unknown tag: {}", t);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
inputs.push(input);
|
||||
}
|
||||
|
||||
let params: HashMap<String, String> = event
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|t| t.kind().as_str() == "param")
|
||||
.filter_map(|p| {
|
||||
let p = p.as_slice();
|
||||
if p.len() == 3 {
|
||||
Some((p[1].clone(), p[2].clone()))
|
||||
} else {
|
||||
warn!("Invalid param: {}", p.join(", "));
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(DVMJobRequest {
|
||||
event: event.clone(),
|
||||
inputs,
|
||||
output_type: event
|
||||
.tags
|
||||
.iter()
|
||||
.find(|t| t.kind().as_str() == "output")
|
||||
.and_then(|t| t.content())
|
||||
.map(|s| s.to_string()),
|
||||
params,
|
||||
bid: event
|
||||
.tags
|
||||
.iter()
|
||||
.find(|t| t.kind().as_str() == "bid")
|
||||
.and_then(|t| t.content())
|
||||
.and_then(|t| t.parse::<u64>().ok()),
|
||||
relays: event
|
||||
.tags
|
||||
.iter()
|
||||
.filter(|t| t.kind().as_str() == "relay")
|
||||
.map(|c| &c.as_slice()[1..])
|
||||
.flatten()
|
||||
.map(|s| s.to_string())
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn start_dvms(client: Client, provisioner: Arc<LNVpsProvisioner>) -> JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
let dvm = LnvpsDvm::new(provisioner, client.clone());
|
||||
if let Err(e) = listen_for_jobs(client, Kind::from_u16(5999), Box::new(dvm)).await {
|
||||
error!("Error listening jobs: {}", e);
|
||||
}
|
||||
})
|
||||
}
|
@ -1,10 +1,10 @@
|
||||
use anyhow::{anyhow, ensure, Context, Error, Result};
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use lnvps_db::async_trait;
|
||||
use log::info;
|
||||
use rocket::serde::Deserialize;
|
||||
use schemars::JsonSchema;
|
||||
use serde::Serialize;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@ -60,7 +60,37 @@ impl Display for Ticker {
|
||||
pub struct TickerRate(pub Ticker, pub f32);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub struct CurrencyAmount(pub Currency, pub f32);
|
||||
pub struct CurrencyAmount(pub Currency, u64);
|
||||
|
||||
impl CurrencyAmount {
|
||||
const MILLI_SATS: f64 = 1.0e11;
|
||||
|
||||
pub fn from_u64(currency: Currency, amount: u64) -> Self {
|
||||
CurrencyAmount(currency, amount)
|
||||
}
|
||||
pub fn from_f32(currency: Currency, amount: f32) -> Self {
|
||||
CurrencyAmount(
|
||||
currency,
|
||||
match currency {
|
||||
Currency::EUR => (amount * 100.0) as u64, // cents
|
||||
Currency::BTC => (amount as f64 * Self::MILLI_SATS) as u64, // milli-sats
|
||||
Currency::USD => (amount * 100.0) as u64, // cents
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn value(&self) -> u64 {
|
||||
self.1
|
||||
}
|
||||
|
||||
pub fn value_f32(&self) -> f32 {
|
||||
match self.0 {
|
||||
Currency::EUR => self.1 as f32 / 100.0,
|
||||
Currency::BTC => (self.1 as f64 / Self::MILLI_SATS) as f32,
|
||||
Currency::USD => self.1 as f32 / 100.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TickerRate {
|
||||
pub fn can_convert(&self, currency: Currency) -> bool {
|
||||
@ -74,9 +104,15 @@ impl TickerRate {
|
||||
"Cant convert, currency doesnt match"
|
||||
);
|
||||
if source.0 == self.0 .0 {
|
||||
Ok(CurrencyAmount(self.0 .1, source.1 * self.1))
|
||||
Ok(CurrencyAmount::from_f32(
|
||||
self.0 .1,
|
||||
source.value_f32() * self.1,
|
||||
))
|
||||
} else {
|
||||
Ok(CurrencyAmount(self.0 .0, source.1 / self.1))
|
||||
Ok(CurrencyAmount::from_f32(
|
||||
self.0 .0,
|
||||
source.value_f32() / self.1,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -99,7 +135,7 @@ pub fn alt_prices(rates: &Vec<TickerRate>, source: CurrencyAmount) -> Vec<Curren
|
||||
let mut ret2 = vec![];
|
||||
for y in rates.iter() {
|
||||
for x in ret.iter() {
|
||||
if let Ok(r1) = y.convert(x.clone()) {
|
||||
if let Ok(r1) = y.convert(*x) {
|
||||
if r1.0 != source.0 {
|
||||
ret2.push(r1);
|
||||
}
|
||||
@ -171,12 +207,14 @@ mod tests {
|
||||
let f = TickerRate(ticker, RATE);
|
||||
|
||||
assert_eq!(
|
||||
f.convert(CurrencyAmount(Currency::EUR, 5.0)).unwrap(),
|
||||
CurrencyAmount(Currency::BTC, 5.0 / RATE)
|
||||
f.convert(CurrencyAmount::from_f32(Currency::EUR, 5.0))
|
||||
.unwrap(),
|
||||
CurrencyAmount::from_f32(Currency::BTC, 5.0 / RATE)
|
||||
);
|
||||
assert_eq!(
|
||||
f.convert(CurrencyAmount(Currency::BTC, 0.001)).unwrap(),
|
||||
CurrencyAmount(Currency::EUR, RATE * 0.001)
|
||||
f.convert(CurrencyAmount::from_f32(Currency::BTC, 0.001))
|
||||
.unwrap(),
|
||||
CurrencyAmount::from_f32(Currency::EUR, RATE * 0.001)
|
||||
);
|
||||
assert!(!f.can_convert(Currency::USD));
|
||||
assert!(f.can_convert(Currency::EUR));
|
||||
|
25
src/fiat/mod.rs
Normal file
25
src/fiat/mod.rs
Normal file
@ -0,0 +1,25 @@
|
||||
/// Fiat payment integrations
|
||||
use crate::exchange::CurrencyAmount;
|
||||
use anyhow::Result;
|
||||
use rocket::serde::{Deserialize, Serialize};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
mod revolut;
|
||||
#[cfg(feature = "revolut")]
|
||||
pub use revolut::*;
|
||||
|
||||
pub trait FiatPaymentService: Send + Sync {
|
||||
fn create_order(
|
||||
&self,
|
||||
description: &str,
|
||||
amount: CurrencyAmount,
|
||||
) -> Pin<Box<dyn Future<Output = Result<FiatPaymentInfo>> + Send>>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct FiatPaymentInfo {
|
||||
pub external_id: String,
|
||||
pub raw_data: String,
|
||||
}
|
290
src/fiat/revolut.rs
Normal file
290
src/fiat/revolut.rs
Normal file
@ -0,0 +1,290 @@
|
||||
use crate::exchange::{Currency, CurrencyAmount};
|
||||
use crate::fiat::{FiatPaymentInfo, FiatPaymentService};
|
||||
use crate::json_api::{JsonApi, TokenGen};
|
||||
use crate::settings::RevolutConfig;
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use nostr::Url;
|
||||
use reqwest::header::AUTHORIZATION;
|
||||
use reqwest::{Client, Method, RequestBuilder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RevolutApi {
|
||||
api: JsonApi,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct RevolutTokenGen {
|
||||
pub token: String,
|
||||
pub api_version: String,
|
||||
}
|
||||
|
||||
impl TokenGen for RevolutTokenGen {
|
||||
fn generate_token(
|
||||
&self,
|
||||
_method: Method,
|
||||
_url: &Url,
|
||||
_body: Option<&str>,
|
||||
req: RequestBuilder,
|
||||
) -> Result<RequestBuilder> {
|
||||
Ok(req
|
||||
.header(AUTHORIZATION, format!("Bearer {}", &self.token))
|
||||
.header("Revolut-Api-Version", &self.api_version))
|
||||
}
|
||||
}
|
||||
|
||||
impl RevolutApi {
|
||||
pub fn new(config: RevolutConfig) -> Result<Self> {
|
||||
let gen = RevolutTokenGen {
|
||||
token: config.token,
|
||||
api_version: config.api_version,
|
||||
};
|
||||
const DEFAULT_URL: &str = "https://merchant.revolut.com";
|
||||
|
||||
Ok(Self {
|
||||
api: JsonApi::token_gen(&config.url.unwrap_or(DEFAULT_URL.to_string()), false, gen)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn list_webhooks(&self) -> Result<Vec<RevolutWebhook>> {
|
||||
self.api.get("/api/1.0/webhooks").await
|
||||
}
|
||||
|
||||
pub async fn delete_webhook(&self, webhook_id: &str) -> Result<()> {
|
||||
self.api
|
||||
.req_status(
|
||||
Method::DELETE,
|
||||
&format!("/api/1.0/webhooks/{}", webhook_id),
|
||||
(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_webhook(
|
||||
&self,
|
||||
url: &str,
|
||||
events: Vec<RevolutWebhookEvent>,
|
||||
) -> Result<RevolutWebhook> {
|
||||
self.api
|
||||
.post(
|
||||
"/api/1.0/webhooks",
|
||||
CreateWebhookRequest {
|
||||
url: url.to_string(),
|
||||
events,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create_order(
|
||||
&self,
|
||||
amount: CurrencyAmount,
|
||||
description: Option<String>,
|
||||
) -> Result<RevolutOrder> {
|
||||
self.api
|
||||
.post(
|
||||
"/api/orders",
|
||||
CreateOrderRequest {
|
||||
currency: amount.0.to_string(),
|
||||
amount: match amount.0 {
|
||||
Currency::BTC => bail!("Bitcoin amount not allowed for fiat payments"),
|
||||
Currency::EUR => amount.value(),
|
||||
Currency::USD => amount.value(),
|
||||
},
|
||||
description,
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_order(&self, order_id: &str) -> Result<RevolutOrder> {
|
||||
self.api.get(&format!("/api/orders/{}", order_id)).await
|
||||
}
|
||||
}
|
||||
|
||||
impl FiatPaymentService for RevolutApi {
|
||||
fn create_order(
|
||||
&self,
|
||||
description: &str,
|
||||
amount: CurrencyAmount,
|
||||
) -> Pin<Box<dyn Future<Output = Result<FiatPaymentInfo>> + Send>> {
|
||||
let s = self.clone();
|
||||
let desc = description.to_string();
|
||||
Box::pin(async move {
|
||||
let rsp = s.create_order(amount, Some(desc)).await?;
|
||||
Ok(FiatPaymentInfo {
|
||||
raw_data: serde_json::to_string(&rsp)?,
|
||||
external_id: rsp.id,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize)]
|
||||
pub struct CreateOrderRequest {
|
||||
pub amount: u64,
|
||||
pub currency: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutOrder {
|
||||
pub id: String,
|
||||
pub token: String,
|
||||
pub state: RevolutOrderState,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub description: Option<String>,
|
||||
pub amount: u64,
|
||||
pub currency: String,
|
||||
pub outstanding_amount: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub checkout_url: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payments: Option<Vec<RevolutOrderPayment>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutOrderPayment {
|
||||
pub id: String,
|
||||
pub state: RevolutPaymentState,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub decline_reason: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bank_message: Option<String>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub token: Option<String>,
|
||||
pub amount: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub currency: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub settled_amount: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub settled_currency: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub payment_method: Option<RevolutPaymentMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub billing_address: Option<RevolutBillingAddress>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub risk_level: Option<RevolutRiskLevel>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutPaymentMethod {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: RevolutPaymentMethodType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_brand: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub funding: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_country_code: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_bin: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_last_four: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub card_expiry: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cardholder_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RevolutPaymentMethodType {
|
||||
ApplePay,
|
||||
Card,
|
||||
GooglePay,
|
||||
RevolutPayCard,
|
||||
RevolutPayAccount,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RevolutRiskLevel {
|
||||
High,
|
||||
Low,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutBillingAddress {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub street_line_1: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub street_line_2: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub region: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub city: Option<String>,
|
||||
|
||||
pub country_code: String,
|
||||
pub postcode: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum RevolutOrderState {
|
||||
Pending,
|
||||
Processing,
|
||||
Authorised,
|
||||
Completed,
|
||||
Cancelled,
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum RevolutPaymentState {
|
||||
Pending,
|
||||
AuthenticationChallenge,
|
||||
AuthenticationVerified,
|
||||
AuthorisationStarted,
|
||||
AuthorisationPassed,
|
||||
Authorised,
|
||||
CaptureStarted,
|
||||
Captured,
|
||||
RefundValidated,
|
||||
RefundStarted,
|
||||
CancellationStarted,
|
||||
Declining,
|
||||
Completing,
|
||||
Cancelling,
|
||||
Failing,
|
||||
Completed,
|
||||
Declined,
|
||||
SoftDeclined,
|
||||
Cancelled,
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct RevolutWebhook {
|
||||
pub id: String,
|
||||
pub url: String,
|
||||
pub events: Vec<RevolutWebhookEvent>,
|
||||
pub signing_secret: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum RevolutWebhookEvent {
|
||||
OrderAuthorised,
|
||||
OrderCompleted,
|
||||
OrderCancelled,
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
pub struct CreateWebhookRequest {
|
||||
pub url: String,
|
||||
pub events: Vec<RevolutWebhookEvent>,
|
||||
}
|
@ -30,11 +30,15 @@ impl VmHostClient for LibVirt {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_vm_state(&self, vm: &Vm) -> anyhow::Result<VmState> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn configure_vm(&self, vm: &Vm) -> anyhow::Result<()> {
|
||||
async fn configure_vm(&self, vm: &FullVmInfo) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
|
@ -10,15 +10,23 @@ use schemars::JsonSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
|
||||
#[cfg(feature = "libvirt")]
|
||||
mod libvirt;
|
||||
//#[cfg(feature = "libvirt")]
|
||||
//mod libvirt;
|
||||
#[cfg(feature = "proxmox")]
|
||||
mod proxmox;
|
||||
|
||||
pub struct TerminalStream {
|
||||
pub rx: Receiver<Vec<u8>>,
|
||||
pub tx: Sender<Vec<u8>>,
|
||||
}
|
||||
|
||||
/// Generic type for creating VM's
|
||||
#[async_trait]
|
||||
pub trait VmHostClient: Send + Sync {
|
||||
async fn get_info(&self) -> Result<VmHostInfo>;
|
||||
|
||||
/// Download OS image to the host
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> Result<()>;
|
||||
|
||||
@ -37,6 +45,9 @@ pub trait VmHostClient: Send + Sync {
|
||||
/// Spawn a VM
|
||||
async fn create_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Re-install a vm OS
|
||||
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> Result<()>;
|
||||
|
||||
/// Get the running status of a VM
|
||||
async fn get_vm_state(&self, vm: &Vm) -> Result<VmState>;
|
||||
|
||||
@ -49,6 +60,9 @@ pub trait VmHostClient: Send + Sync {
|
||||
vm: &Vm,
|
||||
series: TimeSeries,
|
||||
) -> Result<Vec<TimeSeriesData>>;
|
||||
|
||||
/// Connect to terminal serial port
|
||||
async fn connect_terminal(&self, vm: &Vm) -> Result<TerminalStream>;
|
||||
}
|
||||
|
||||
pub fn get_host_client(host: &VmHost, cfg: &ProvisionerConfig) -> Result<Arc<dyn VmHostClient>> {
|
||||
@ -84,6 +98,8 @@ pub fn get_host_client(host: &VmHost, cfg: &ProvisionerConfig) -> Result<Arc<dyn
|
||||
pub struct FullVmInfo {
|
||||
/// Instance to create
|
||||
pub vm: Vm,
|
||||
/// Host where the VM will be spawned
|
||||
pub host: VmHost,
|
||||
/// Disk where this VM will be saved on the host
|
||||
pub disk: VmHostDisk,
|
||||
/// VM template resources
|
||||
@ -103,6 +119,7 @@ pub struct FullVmInfo {
|
||||
impl FullVmInfo {
|
||||
pub async fn load(vm_id: u64, db: Arc<dyn LNVpsDb>) -> Result<Self> {
|
||||
let vm = db.get_vm(vm_id).await?;
|
||||
let host = db.get_host(vm.host_id).await?;
|
||||
let image = db.get_os_image(vm.image_id).await?;
|
||||
let disk = db.get_host_disk(vm.disk_id).await?;
|
||||
let ssh_key = db.get_user_ssh_key(vm.ssh_key_id).await?;
|
||||
@ -129,6 +146,7 @@ impl FullVmInfo {
|
||||
// create VM
|
||||
Ok(FullVmInfo {
|
||||
vm,
|
||||
host,
|
||||
template,
|
||||
custom_template,
|
||||
image,
|
||||
@ -186,3 +204,17 @@ pub enum TimeSeries {
|
||||
Monthly,
|
||||
Yearly,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VmHostInfo {
|
||||
pub cpu: u16,
|
||||
pub memory: u64,
|
||||
pub disks: Vec<VmHostDiskInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VmHostDiskInfo {
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub used: u64,
|
||||
}
|
||||
|
@ -1,12 +1,16 @@
|
||||
use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient};
|
||||
use crate::host::{
|
||||
FullVmInfo, TerminalStream, TimeSeries, TimeSeriesData, VmHostClient, VmHostDiskInfo,
|
||||
VmHostInfo,
|
||||
};
|
||||
use crate::json_api::JsonApi;
|
||||
use crate::settings::{QemuConfig, SshConfig};
|
||||
use crate::ssh_client::SshClient;
|
||||
use crate::status::{VmRunningState, VmState};
|
||||
use anyhow::{anyhow, bail, ensure, Result};
|
||||
use anyhow::{anyhow, bail, ensure, Context, Result};
|
||||
use chrono::Utc;
|
||||
use futures::StreamExt;
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::{async_trait, DiskType, Vm, VmOsImage};
|
||||
use lnvps_db::{async_trait, DiskType, IpRangeAllocationMode, Vm, VmOsImage};
|
||||
use log::{info, warn};
|
||||
use rand::random;
|
||||
use reqwest::header::{HeaderMap, AUTHORIZATION};
|
||||
@ -14,9 +18,11 @@ use reqwest::{ClientBuilder, Method, Url};
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
use std::io::Write;
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::channel;
|
||||
use tokio::time::sleep;
|
||||
|
||||
pub struct ProxmoxClient {
|
||||
@ -36,19 +42,8 @@ impl ProxmoxClient {
|
||||
config: QemuConfig,
|
||||
ssh: Option<SshConfig>,
|
||||
) -> Self {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
AUTHORIZATION,
|
||||
format!("PVEAPIToken={}", token).parse().unwrap(),
|
||||
);
|
||||
let client = ClientBuilder::new()
|
||||
.danger_accept_invalid_certs(true)
|
||||
.default_headers(headers)
|
||||
.build()
|
||||
.expect("Failed to build client");
|
||||
|
||||
Self {
|
||||
api: JsonApi { base, client },
|
||||
api: JsonApi::token(base.as_str(), &format!("PVEAPIToken={}", token), true).unwrap(),
|
||||
config,
|
||||
ssh,
|
||||
node: node.to_string(),
|
||||
@ -94,6 +89,14 @@ impl ProxmoxClient {
|
||||
Ok(rsp.data)
|
||||
}
|
||||
|
||||
pub async fn list_disks(&self, node: &str) -> Result<Vec<NodeDisk>> {
|
||||
let rsp: ResponseBase<Vec<NodeDisk>> = self
|
||||
.api
|
||||
.get(&format!("/api2/json/nodes/{node}/disks/list"))
|
||||
.await?;
|
||||
Ok(rsp.data)
|
||||
}
|
||||
|
||||
/// List files in a storage pool
|
||||
pub async fn list_storage_files(
|
||||
&self,
|
||||
@ -248,7 +251,7 @@ impl ProxmoxClient {
|
||||
if let Some(ssh_config) = &self.ssh {
|
||||
let mut ses = SshClient::new()?;
|
||||
ses.connect(
|
||||
(self.api.base.host().unwrap().to_string(), 22),
|
||||
(self.api.base().host().unwrap().to_string(), 22),
|
||||
&ssh_config.user,
|
||||
&ssh_config.key,
|
||||
)
|
||||
@ -365,6 +368,30 @@ impl ProxmoxClient {
|
||||
node: node.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Delete disks from VM
|
||||
pub async fn unlink_disk(
|
||||
&self,
|
||||
node: &str,
|
||||
vm: ProxmoxVmId,
|
||||
disks: Vec<String>,
|
||||
force: bool,
|
||||
) -> Result<()> {
|
||||
self.api
|
||||
.req_status(
|
||||
Method::PUT,
|
||||
&format!(
|
||||
"/api2/json/nodes/{}/qemu/{}/unlink?idlist={}&force={}",
|
||||
node,
|
||||
vm,
|
||||
disks.join(","),
|
||||
if force { "1" } else { "0" }
|
||||
),
|
||||
(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ProxmoxClient {
|
||||
@ -376,15 +403,28 @@ impl ProxmoxClient {
|
||||
if let Ok(net) = ip.ip.parse::<IpAddr>() {
|
||||
Some(match net {
|
||||
IpAddr::V4(addr) => {
|
||||
let range = value.ranges.iter().find(|r| r.id == ip.ip_range_id)?;
|
||||
let range: IpNetwork = range.gateway.parse().ok()?;
|
||||
let ip_range = value.ranges.iter().find(|r| r.id == ip.ip_range_id)?;
|
||||
let range: IpNetwork = ip_range.cidr.parse().ok()?;
|
||||
let range_gw: IpNetwork = ip_range.gateway.parse().ok()?;
|
||||
// take the largest (smallest prefix number) of the network prefixes
|
||||
let max_net = range.prefix().min(range_gw.prefix());
|
||||
format!(
|
||||
"ip={},gw={}",
|
||||
IpNetwork::new(addr.into(), range.prefix()).ok()?,
|
||||
range.ip()
|
||||
IpNetwork::new(addr.into(), max_net).ok()?,
|
||||
range_gw.ip()
|
||||
)
|
||||
}
|
||||
IpAddr::V6(addr) => format!("ip6={}", addr),
|
||||
IpAddr::V6(addr) => {
|
||||
let ip_range = value.ranges.iter().find(|r| r.id == ip.ip_range_id)?;
|
||||
if matches!(ip_range.allocation_mode, IpRangeAllocationMode::SlaacEui64)
|
||||
{
|
||||
// just ignore what's in the db and use whatever the host wants
|
||||
// what's in the db is purely informational
|
||||
"ip6=auto".to_string()
|
||||
} else {
|
||||
format!("ip6={}", addr)
|
||||
}
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@ -392,14 +432,11 @@ impl ProxmoxClient {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// TODO: make this configurable
|
||||
ip_config.push("ip6=auto".to_string());
|
||||
|
||||
let mut net = vec![
|
||||
format!("virtio={}", value.vm.mac_address),
|
||||
format!("bridge={}", self.config.bridge),
|
||||
];
|
||||
if let Some(t) = self.config.vlan {
|
||||
if let Some(t) = value.host.vlan_id {
|
||||
net.push(format!("tag={}", t));
|
||||
}
|
||||
|
||||
@ -415,7 +452,7 @@ impl ProxmoxClient {
|
||||
bios: Some(VmBios::OVMF),
|
||||
boot: Some("order=scsi0".to_string()),
|
||||
cores: Some(vm_resources.cpu as i32),
|
||||
memory: Some((vm_resources.memory / 1024 / 1024).to_string()),
|
||||
memory: Some((vm_resources.memory / crate::MB).to_string()),
|
||||
scsi_hw: Some("virtio-scsi-pci".to_string()),
|
||||
serial_0: Some("socket".to_string()),
|
||||
scsi_1: Some(format!("{}:cloudinit", &value.disk.name)),
|
||||
@ -424,9 +461,74 @@ impl ProxmoxClient {
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Import main disk image from the template
|
||||
async fn import_template_disk(&self, req: &FullVmInfo) -> Result<()> {
|
||||
let vm_id = req.vm.id.into();
|
||||
|
||||
// import primary disk from image (scsi0)
|
||||
self.import_disk_image(ImportDiskImageRequest {
|
||||
vm_id,
|
||||
node: self.node.clone(),
|
||||
storage: req.disk.name.clone(),
|
||||
disk: "scsi0".to_string(),
|
||||
image: req.image.filename()?,
|
||||
is_ssd: matches!(req.disk.kind, DiskType::SSD),
|
||||
})
|
||||
.await?;
|
||||
|
||||
// resize disk to match template
|
||||
let j_resize = self
|
||||
.resize_disk(ResizeDiskRequest {
|
||||
node: self.node.clone(),
|
||||
vm_id,
|
||||
disk: "scsi0".to_string(),
|
||||
size: req.resources()?.disk_size.to_string(),
|
||||
})
|
||||
.await?;
|
||||
// TODO: rollback
|
||||
self.wait_for_task(&j_resize).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl VmHostClient for ProxmoxClient {
|
||||
async fn get_info(&self) -> Result<VmHostInfo> {
|
||||
let nodes = self.list_nodes().await?;
|
||||
if let Some(n) = nodes.iter().find(|n| n.name == self.node) {
|
||||
let storages = self.list_storage(&n.name).await?;
|
||||
let info = VmHostInfo {
|
||||
cpu: n.max_cpu
|
||||
.context("Missing cpu count, please make sure you have Sys.Audit permission")?,
|
||||
memory: n.max_mem
|
||||
.context("Missing memory size, please make sure you have Sys.Audit permission")?,
|
||||
disks: storages
|
||||
.into_iter()
|
||||
.filter_map(|s| {
|
||||
let size = s.total
|
||||
.context("Missing disk size, please make sure you have Datastore.Audit permission")
|
||||
.ok()?;
|
||||
let used = s.used
|
||||
.context("Missing used disk, please make sure you have Datastore.Audit permission")
|
||||
.ok()?;
|
||||
|
||||
Some(VmHostDiskInfo {
|
||||
name: s.storage,
|
||||
size,
|
||||
used,
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
Ok(info)
|
||||
} else {
|
||||
bail!("Could not find node {}", self.node);
|
||||
}
|
||||
}
|
||||
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> Result<()> {
|
||||
let iso_storage = self.get_iso_storage(&self.node).await?;
|
||||
let files = self.list_storage_files(&self.node, &iso_storage).await?;
|
||||
@ -496,28 +598,35 @@ impl VmHostClient for ProxmoxClient {
|
||||
.await?;
|
||||
self.wait_for_task(&t_create).await?;
|
||||
|
||||
// import primary disk from image (scsi0)
|
||||
self.import_disk_image(ImportDiskImageRequest {
|
||||
vm_id,
|
||||
node: self.node.clone(),
|
||||
storage: req.disk.name.clone(),
|
||||
disk: "scsi0".to_string(),
|
||||
image: req.image.filename()?,
|
||||
is_ssd: matches!(req.disk.kind, DiskType::SSD),
|
||||
})
|
||||
.await?;
|
||||
// import template image
|
||||
self.import_template_disk(&req).await?;
|
||||
|
||||
// resize disk to match template
|
||||
let j_resize = self
|
||||
.resize_disk(ResizeDiskRequest {
|
||||
node: self.node.clone(),
|
||||
vm_id,
|
||||
disk: "scsi0".to_string(),
|
||||
size: req.resources()?.disk_size.to_string(),
|
||||
})
|
||||
// try start, otherwise ignore error (maybe its already running)
|
||||
if let Ok(j_start) = self.start_vm(&self.node, vm_id).await {
|
||||
if let Err(e) = self.wait_for_task(&j_start).await {
|
||||
warn!("Failed to start vm: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn reinstall_vm(&self, req: &FullVmInfo) -> Result<()> {
|
||||
let vm_id = req.vm.id.into();
|
||||
|
||||
// try stop, otherwise ignore error (maybe its already running)
|
||||
if let Ok(j_stop) = self.stop_vm(&self.node, vm_id).await {
|
||||
if let Err(e) = self.wait_for_task(&j_stop).await {
|
||||
warn!("Failed to stop vm: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// unlink the existing main disk
|
||||
self.unlink_disk(&self.node, vm_id, vec!["scsi0".to_string()], true)
|
||||
.await?;
|
||||
// TODO: rollback
|
||||
self.wait_for_task(&j_resize).await?;
|
||||
|
||||
// import disk from template again
|
||||
self.import_template_disk(&req).await?;
|
||||
|
||||
// try start, otherwise ignore error (maybe its already running)
|
||||
if let Ok(j_start) = self.start_vm(&self.node, vm_id).await {
|
||||
@ -585,6 +694,31 @@ impl VmHostClient for ProxmoxClient {
|
||||
.await?;
|
||||
Ok(r.into_iter().map(TimeSeriesData::from).collect())
|
||||
}
|
||||
|
||||
async fn connect_terminal(&self, vm: &Vm) -> Result<TerminalStream> {
|
||||
let vm_id: ProxmoxVmId = vm.id.into();
|
||||
|
||||
let (mut client_tx, client_rx) = channel::<Vec<u8>>(1024);
|
||||
let (server_tx, mut server_rx) = channel::<Vec<u8>>(1024);
|
||||
tokio::spawn(async move {
|
||||
// fire calls to read every 100ms
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(buf) = server_rx.recv() => {
|
||||
// echo
|
||||
client_tx.send(buf).await?;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
info!("SSH connection terminated!");
|
||||
Ok::<(), anyhow::Error>(())
|
||||
});
|
||||
Ok(TerminalStream {
|
||||
rx: client_rx,
|
||||
tx: server_tx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap a database vm id
|
||||
@ -771,6 +905,7 @@ pub enum StorageContent {
|
||||
ISO,
|
||||
VZTmpL,
|
||||
Import,
|
||||
Snippets,
|
||||
}
|
||||
|
||||
impl FromStr for StorageContent {
|
||||
@ -784,6 +919,7 @@ impl FromStr for StorageContent {
|
||||
"iso" => Ok(StorageContent::ISO),
|
||||
"vztmpl" => Ok(StorageContent::VZTmpL),
|
||||
"import" => Ok(StorageContent::Import),
|
||||
"snippets" => Ok(StorageContent::Snippets),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
@ -794,19 +930,28 @@ pub struct NodeStorage {
|
||||
pub content: String,
|
||||
pub storage: String,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: Option<StorageType>,
|
||||
#[serde(rename = "thinpool")]
|
||||
pub thin_pool: Option<String>,
|
||||
pub kind: StorageType,
|
||||
/// Available storage space in bytes
|
||||
#[serde(rename = "avial")]
|
||||
pub available: Option<u64>,
|
||||
/// Total storage space in bytes
|
||||
pub total: Option<u64>,
|
||||
/// Used storage space in bytes
|
||||
pub used: Option<u64>,
|
||||
}
|
||||
|
||||
impl NodeStorage {
|
||||
pub fn contents(&self) -> Vec<StorageContent> {
|
||||
self.content
|
||||
.split(",")
|
||||
.map_while(|s| s.parse().ok())
|
||||
.map_while(|s| StorageContent::from_str(&s).ok())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct NodeDisk {}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DownloadUrlRequest {
|
||||
pub content: StorageContent,
|
||||
@ -977,3 +1122,187 @@ impl From<RrdDataPoint> for TimeSeriesData {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{GB, MB, TB};
|
||||
use lnvps_db::{
|
||||
DiskInterface, IpRange, IpRangeAllocationMode, OsDistribution, UserSshKey, VmHost,
|
||||
VmHostDisk, VmIpAssignment, VmTemplate,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_config() -> Result<()> {
|
||||
let template = VmTemplate {
|
||||
id: 1,
|
||||
name: "example".to_string(),
|
||||
enabled: true,
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
cpu: 2,
|
||||
memory: 2 * GB,
|
||||
disk_size: 100 * GB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
region_id: 1,
|
||||
};
|
||||
let cfg = FullVmInfo {
|
||||
vm: Vm {
|
||||
id: 1,
|
||||
host_id: 1,
|
||||
user_id: 1,
|
||||
image_id: 1,
|
||||
template_id: Some(template.id),
|
||||
custom_template_id: None,
|
||||
ssh_key_id: 1,
|
||||
created: Default::default(),
|
||||
expires: Default::default(),
|
||||
disk_id: 1,
|
||||
mac_address: "ff:ff:ff:ff:ff:fe".to_string(),
|
||||
deleted: false,
|
||||
ref_code: None,
|
||||
},
|
||||
host: VmHost {
|
||||
id: 1,
|
||||
kind: Default::default(),
|
||||
region_id: 1,
|
||||
name: "mock".to_string(),
|
||||
ip: "https://localhost:8006".to_string(),
|
||||
cpu: 20,
|
||||
memory: 128 * GB,
|
||||
enabled: true,
|
||||
api_token: "mock".to_string(),
|
||||
load_cpu: 1.0,
|
||||
load_memory: 1.0,
|
||||
load_disk: 1.0,
|
||||
vlan_id: Some(100),
|
||||
},
|
||||
disk: VmHostDisk {
|
||||
id: 1,
|
||||
host_id: 1,
|
||||
name: "ssd".to_string(),
|
||||
size: TB * 20,
|
||||
kind: DiskType::SSD,
|
||||
interface: DiskInterface::PCIe,
|
||||
enabled: true,
|
||||
},
|
||||
template: Some(template.clone()),
|
||||
custom_template: None,
|
||||
image: VmOsImage {
|
||||
id: 1,
|
||||
distribution: OsDistribution::Ubuntu,
|
||||
flavour: "Server".to_string(),
|
||||
version: "24.04.03".to_string(),
|
||||
enabled: true,
|
||||
release_date: Utc::now(),
|
||||
url: "http://localhost.com/ubuntu_server_24.04.img".to_string(),
|
||||
},
|
||||
ips: vec![
|
||||
VmIpAssignment {
|
||||
id: 1,
|
||||
vm_id: 1,
|
||||
ip_range_id: 1,
|
||||
ip: "192.168.1.2".to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
},
|
||||
VmIpAssignment {
|
||||
id: 2,
|
||||
vm_id: 1,
|
||||
ip_range_id: 2,
|
||||
ip: "192.168.2.2".to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
},
|
||||
VmIpAssignment {
|
||||
id: 3,
|
||||
vm_id: 1,
|
||||
ip_range_id: 3,
|
||||
ip: "fd00::ff:ff:ff:ff:ff".to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
},
|
||||
],
|
||||
ranges: vec![
|
||||
IpRange {
|
||||
id: 1,
|
||||
cidr: "192.168.1.0/24".to_string(),
|
||||
gateway: "192.168.1.1/16".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
..Default::default()
|
||||
},
|
||||
IpRange {
|
||||
id: 2,
|
||||
cidr: "192.168.2.0/24".to_string(),
|
||||
gateway: "10.10.10.10".to_string(),
|
||||
enabled: true,
|
||||
region_id: 2,
|
||||
..Default::default()
|
||||
},
|
||||
IpRange {
|
||||
id: 3,
|
||||
cidr: "fd00::/64".to_string(),
|
||||
gateway: "fd00::1".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
allocation_mode: IpRangeAllocationMode::SlaacEui64,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
ssh_key: UserSshKey {
|
||||
id: 1,
|
||||
name: "test".to_string(),
|
||||
user_id: 1,
|
||||
created: Default::default(),
|
||||
key_data: "ssh-ed25519 AAA=".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
let q_cfg = QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr1".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
kvm: true,
|
||||
};
|
||||
|
||||
let p = ProxmoxClient::new(
|
||||
"http://localhost:8006".parse()?,
|
||||
"",
|
||||
"",
|
||||
None,
|
||||
q_cfg.clone(),
|
||||
None,
|
||||
);
|
||||
|
||||
let vm = p.make_config(&cfg)?;
|
||||
assert_eq!(vm.cpu, Some(q_cfg.cpu));
|
||||
assert_eq!(vm.cores, Some(template.cpu as i32));
|
||||
assert_eq!(vm.memory, Some((template.memory / MB).to_string()));
|
||||
assert_eq!(vm.on_boot, Some(true));
|
||||
assert!(vm.net.unwrap().contains("tag=100"));
|
||||
assert_eq!(
|
||||
vm.ip_config,
|
||||
Some(
|
||||
"ip=192.168.1.2/16,gw=192.168.1.1,ip=192.168.2.2/24,gw=10.10.10.10,ip6=auto"
|
||||
.to_string()
|
||||
)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,64 +0,0 @@
|
||||
use crate::lightning::{InvoiceUpdate, LightningNode};
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::{error, info, warn};
|
||||
use nostr::util::hex;
|
||||
use rocket::futures::StreamExt;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
pub struct InvoiceHandler {
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
}
|
||||
|
||||
impl InvoiceHandler {
|
||||
pub fn new(
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
) -> Self {
|
||||
Self { node, tx, db }
|
||||
}
|
||||
|
||||
async fn mark_paid(&self, settle_index: u64, id: &Vec<u8>) -> Result<()> {
|
||||
let mut p = self.db.get_vm_payment(id).await?;
|
||||
p.settle_index = Some(settle_index);
|
||||
self.db.vm_payment_paid(&p).await?;
|
||||
|
||||
info!("VM payment {} for {}, paid", hex::encode(p.id), p.vm_id);
|
||||
self.tx.send(WorkJob::CheckVm { vm_id: p.vm_id })?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self) -> Result<()> {
|
||||
let from_ph = self.db.last_paid_invoice().await?.map(|i| i.id.clone());
|
||||
info!(
|
||||
"Listening for invoices from {}",
|
||||
from_ph
|
||||
.as_ref()
|
||||
.map(hex::encode)
|
||||
.unwrap_or("NOW".to_string())
|
||||
);
|
||||
|
||||
let mut handler = self.node.subscribe_invoices(from_ph).await?;
|
||||
while let Some(msg) = handler.next().await {
|
||||
match msg {
|
||||
InvoiceUpdate::Settled {
|
||||
payment_hash,
|
||||
settle_index,
|
||||
} => {
|
||||
let r_hash = hex::decode(payment_hash)?;
|
||||
if let Err(e) = self.mark_paid(settle_index, &r_hash).await {
|
||||
error!("{}", e);
|
||||
}
|
||||
}
|
||||
v => warn!("Unknown invoice update: {:?}", v),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
148
src/json_api.rs
148
src/json_api.rs
@ -1,46 +1,112 @@
|
||||
use anyhow::bail;
|
||||
use anyhow::{bail, Result};
|
||||
use log::debug;
|
||||
use reqwest::header::{HeaderMap, AUTHORIZATION};
|
||||
use reqwest::{Client, Method, Url};
|
||||
use reqwest::header::{HeaderMap, ACCEPT, AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
|
||||
use reqwest::{Client, Method, RequestBuilder, Url};
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub trait TokenGen: Send + Sync {
|
||||
fn generate_token(
|
||||
&self,
|
||||
method: Method,
|
||||
url: &Url,
|
||||
body: Option<&str>,
|
||||
req: RequestBuilder,
|
||||
) -> Result<RequestBuilder>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct JsonApi {
|
||||
pub client: Client,
|
||||
pub base: Url,
|
||||
client: Client,
|
||||
base: Url,
|
||||
/// Custom token generator per request
|
||||
token_gen: Option<Arc<dyn TokenGen>>,
|
||||
}
|
||||
|
||||
impl JsonApi {
|
||||
pub fn token(base: &str, token: &str) -> anyhow::Result<Self> {
|
||||
pub fn new(base: &str) -> Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(AUTHORIZATION, token.parse()?);
|
||||
headers.insert(USER_AGENT, "lnvps/1.0".parse()?);
|
||||
headers.insert(ACCEPT, "application/json; charset=utf-8".parse()?);
|
||||
|
||||
let client = Client::builder().default_headers(headers).build()?;
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
token_gen: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get<T: DeserializeOwned>(&self, path: &str) -> anyhow::Result<T> {
|
||||
pub fn token(base: &str, token: &str, allow_invalid_certs: bool) -> Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(USER_AGENT, "lnvps/1.0".parse()?);
|
||||
headers.insert(AUTHORIZATION, token.parse()?);
|
||||
headers.insert(ACCEPT, "application/json; charset=utf-8".parse()?);
|
||||
|
||||
let client = Client::builder()
|
||||
.danger_accept_invalid_certs(allow_invalid_certs)
|
||||
.default_headers(headers)
|
||||
.build()?;
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
token_gen: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn token_gen(
|
||||
base: &str,
|
||||
allow_invalid_certs: bool,
|
||||
tg: impl TokenGen + 'static,
|
||||
) -> Result<Self> {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(USER_AGENT, "lnvps/1.0".parse()?);
|
||||
headers.insert(ACCEPT, "application/json; charset=utf-8".parse()?);
|
||||
|
||||
let client = Client::builder()
|
||||
.danger_accept_invalid_certs(allow_invalid_certs)
|
||||
.default_headers(headers)
|
||||
.build()?;
|
||||
Ok(Self {
|
||||
client,
|
||||
base: base.parse()?,
|
||||
token_gen: Some(Arc::new(tg)),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn base(&self) -> &Url {
|
||||
&self.base
|
||||
}
|
||||
|
||||
pub async fn get<T: DeserializeOwned>(&self, path: &str) -> Result<T> {
|
||||
let text = self.get_raw(path).await?;
|
||||
Ok(serde_json::from_str::<T>(&text)?)
|
||||
}
|
||||
|
||||
/// Get raw string response
|
||||
pub async fn get_raw(&self, path: &str) -> Result<String> {
|
||||
debug!(">> GET {}", path);
|
||||
let rsp = self.client.get(self.base.join(path)?).send().await?;
|
||||
let url = self.base.join(path)?;
|
||||
let mut req = self.client.request(Method::GET, url.clone());
|
||||
if let Some(gen) = &self.token_gen {
|
||||
req = gen.generate_token(Method::GET, &url, None, req)?;
|
||||
}
|
||||
let req = req.build()?;
|
||||
debug!(">> HEADERS {:?}", req.headers());
|
||||
let rsp = self.client.execute(req).await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(serde_json::from_str(&text)?)
|
||||
Ok(text)
|
||||
} else {
|
||||
bail!("{}", status);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn post<T: DeserializeOwned, R: Serialize>(
|
||||
&self,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> anyhow::Result<T> {
|
||||
pub async fn post<T: DeserializeOwned, R: Serialize>(&self, path: &str, body: R) -> Result<T> {
|
||||
self.req(Method::POST, path, body).await
|
||||
}
|
||||
|
||||
@ -49,17 +115,20 @@ impl JsonApi {
|
||||
method: Method,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> anyhow::Result<T> {
|
||||
) -> Result<T> {
|
||||
let body = serde_json::to_string(&body)?;
|
||||
debug!(">> {} {}: {}", method.clone(), path, &body);
|
||||
let rsp = self
|
||||
let url = self.base.join(path)?;
|
||||
let mut req = self
|
||||
.client
|
||||
.request(method.clone(), self.base.join(path)?)
|
||||
.header("Content-Type", "application/json")
|
||||
.header("Accept", "application/json")
|
||||
.body(body)
|
||||
.send()
|
||||
.await?;
|
||||
.request(method.clone(), url.clone())
|
||||
.header(CONTENT_TYPE, "application/json; charset=utf-8");
|
||||
if let Some(gen) = self.token_gen.as_ref() {
|
||||
req = gen.generate_token(method.clone(), &url, Some(&body), req)?;
|
||||
}
|
||||
let req = req.body(body).build()?;
|
||||
debug!(">> HEADERS {:?}", req.headers());
|
||||
let rsp = self.client.execute(req).await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
@ -67,7 +136,36 @@ impl JsonApi {
|
||||
if status.is_success() {
|
||||
Ok(serde_json::from_str(&text)?)
|
||||
} else {
|
||||
bail!("{} {}: {}: {}", method, path, status, &text);
|
||||
bail!("{} {}: {}: {}", method, url, status, &text);
|
||||
}
|
||||
}
|
||||
|
||||
/// Make a request and only return the status code
|
||||
pub async fn req_status<R: Serialize>(
|
||||
&self,
|
||||
method: Method,
|
||||
path: &str,
|
||||
body: R,
|
||||
) -> Result<u16> {
|
||||
let body = serde_json::to_string(&body)?;
|
||||
debug!(">> {} {}: {}", method.clone(), path, &body);
|
||||
let url = self.base.join(path)?;
|
||||
let mut req = self
|
||||
.client
|
||||
.request(method.clone(), url.clone())
|
||||
.header(CONTENT_TYPE, "application/json; charset=utf-8");
|
||||
if let Some(gen) = &self.token_gen {
|
||||
req = gen.generate_token(method.clone(), &url, Some(&body), req)?;
|
||||
}
|
||||
let rsp = req.body(body).send().await?;
|
||||
let status = rsp.status();
|
||||
let text = rsp.text().await?;
|
||||
#[cfg(debug_assertions)]
|
||||
debug!("<< {}", text);
|
||||
if status.is_success() {
|
||||
Ok(status.as_u16())
|
||||
} else {
|
||||
bail!("{} {}: {}: {}", method, url, status, &text);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
14
src/lib.rs
14
src/lib.rs
@ -1,12 +1,14 @@
|
||||
pub mod api;
|
||||
pub mod cors;
|
||||
pub mod data_migration;
|
||||
pub mod dns;
|
||||
pub mod exchange;
|
||||
pub mod fiat;
|
||||
pub mod host;
|
||||
pub mod invoice;
|
||||
pub mod json_api;
|
||||
pub mod lightning;
|
||||
pub mod nip98;
|
||||
pub mod payments;
|
||||
pub mod provisioner;
|
||||
pub mod router;
|
||||
pub mod settings;
|
||||
@ -17,3 +19,13 @@ pub mod worker;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod mocks;
|
||||
|
||||
#[cfg(feature = "nostr-dvm")]
|
||||
pub mod dvm;
|
||||
|
||||
/// SATS per BTC
|
||||
pub const BTC_SATS: f64 = 100_000_000.0;
|
||||
pub const KB: u64 = 1024;
|
||||
pub const MB: u64 = KB * 1024;
|
||||
pub const GB: u64 = MB * 1024;
|
||||
pub const TB: u64 = GB * 1024;
|
||||
|
@ -1,9 +1,11 @@
|
||||
use crate::api::WEBHOOK_BRIDGE;
|
||||
use crate::api::{WebhookMessage, WEBHOOK_BRIDGE};
|
||||
use crate::json_api::JsonApi;
|
||||
use crate::lightning::{AddInvoiceRequest, AddInvoiceResult, InvoiceUpdate, LightningNode};
|
||||
use anyhow::bail;
|
||||
use anyhow::{anyhow, bail};
|
||||
use futures::{Stream, StreamExt};
|
||||
use hmac::{Hmac, Mac};
|
||||
use lnvps_db::async_trait;
|
||||
use log::{info, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::pin::Pin;
|
||||
use tokio_stream::wrappers::BroadcastStream;
|
||||
@ -17,7 +19,7 @@ impl BitvoraNode {
|
||||
pub fn new(api_token: &str, webhook_secret: &str) -> Self {
|
||||
let auth = format!("Bearer {}", api_token);
|
||||
Self {
|
||||
api: JsonApi::token("https://api.bitvora.com/", &auth).unwrap(),
|
||||
api: JsonApi::token("https://api.bitvora.com/", &auth, false).unwrap(),
|
||||
webhook_secret: webhook_secret.to_string(),
|
||||
}
|
||||
}
|
||||
@ -46,6 +48,7 @@ impl LightningNode for BitvoraNode {
|
||||
Ok(AddInvoiceResult {
|
||||
pr: rsp.data.payment_request,
|
||||
payment_hash: rsp.data.r_hash,
|
||||
external_id: Some(rsp.data.id),
|
||||
})
|
||||
}
|
||||
|
||||
@ -54,7 +57,45 @@ impl LightningNode for BitvoraNode {
|
||||
_from_payment_hash: Option<Vec<u8>>,
|
||||
) -> anyhow::Result<Pin<Box<dyn Stream<Item = InvoiceUpdate> + Send>>> {
|
||||
let rx = BroadcastStream::new(WEBHOOK_BRIDGE.listen());
|
||||
let mapped = rx.then(|r| async move { InvoiceUpdate::Unknown });
|
||||
let secret = self.webhook_secret.clone();
|
||||
let mapped = rx.then(move |r| {
|
||||
let secret = secret.clone();
|
||||
async move {
|
||||
match r {
|
||||
Ok(r) => {
|
||||
if r.endpoint != "/api/v1/webhook/bitvora" {
|
||||
return InvoiceUpdate::Unknown;
|
||||
}
|
||||
let r_body = r.body.as_slice();
|
||||
info!("Received webhook {}", String::from_utf8_lossy(r_body));
|
||||
let body: BitvoraWebhook = match serde_json::from_slice(r_body) {
|
||||
Ok(b) => b,
|
||||
Err(e) => return InvoiceUpdate::Error(e.to_string()),
|
||||
};
|
||||
|
||||
if let Err(e) = verify_webhook(&secret, &r) {
|
||||
return InvoiceUpdate::Error(e.to_string());
|
||||
}
|
||||
|
||||
match body.event {
|
||||
BitvoraWebhookEvent::DepositLightningComplete => {
|
||||
InvoiceUpdate::Settled {
|
||||
payment_hash: None,
|
||||
external_id: Some(body.data.lightning_invoice_id),
|
||||
}
|
||||
}
|
||||
BitvoraWebhookEvent::DepositLightningFailed => {
|
||||
InvoiceUpdate::Error("Payment failed".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error handling webhook: {}", e);
|
||||
InvoiceUpdate::Error(e.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(Box::pin(mapped))
|
||||
}
|
||||
}
|
||||
@ -80,3 +121,43 @@ struct CreateInvoiceResponse {
|
||||
pub r_hash: String,
|
||||
pub payment_request: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
struct BitvoraWebhook {
|
||||
pub event: BitvoraWebhookEvent,
|
||||
pub data: BitvoraPayment,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
enum BitvoraWebhookEvent {
|
||||
#[serde(rename = "deposit.lightning.completed")]
|
||||
DepositLightningComplete,
|
||||
#[serde(rename = "deposit.lightning.failed")]
|
||||
DepositLightningFailed,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
struct BitvoraPayment {
|
||||
pub id: String,
|
||||
pub lightning_invoice_id: String,
|
||||
}
|
||||
|
||||
type HmacSha256 = Hmac<sha2::Sha256>;
|
||||
fn verify_webhook(secret: &str, msg: &WebhookMessage) -> anyhow::Result<()> {
|
||||
let sig = msg
|
||||
.headers
|
||||
.get("bitvora-signature")
|
||||
.ok_or_else(|| anyhow!("Missing bitvora-signature header"))?;
|
||||
|
||||
let mut mac = HmacSha256::new_from_slice(secret.as_bytes())?;
|
||||
mac.update(msg.body.as_slice());
|
||||
let result = mac.finalize().into_bytes();
|
||||
|
||||
if hex::encode(result) == *sig {
|
||||
return Ok(());
|
||||
} else {
|
||||
warn!("Invalid signature found {} != {}", sig, hex::encode(result));
|
||||
}
|
||||
|
||||
bail!("No valid signature found!");
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ impl LightningNode for LndNode {
|
||||
Ok(AddInvoiceResult {
|
||||
pr: inner.payment_request,
|
||||
payment_hash: hex::encode(inner.r_hash),
|
||||
external_id: None,
|
||||
})
|
||||
}
|
||||
|
||||
@ -78,8 +79,8 @@ impl LightningNode for LndNode {
|
||||
Ok(m) => {
|
||||
if m.state == InvoiceState::Settled as i32 {
|
||||
InvoiceUpdate::Settled {
|
||||
settle_index: m.settle_index,
|
||||
payment_hash: hex::encode(m.r_hash),
|
||||
payment_hash: Some(hex::encode(m.r_hash)),
|
||||
external_id: None,
|
||||
}
|
||||
} else {
|
||||
InvoiceUpdate::Unknown
|
||||
|
@ -31,6 +31,7 @@ pub struct AddInvoiceRequest {
|
||||
pub struct AddInvoiceResult {
|
||||
pub pr: String,
|
||||
pub payment_hash: String,
|
||||
pub external_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@ -39,8 +40,8 @@ pub enum InvoiceUpdate {
|
||||
Unknown,
|
||||
Error(String),
|
||||
Settled {
|
||||
payment_hash: String,
|
||||
settle_index: u64,
|
||||
payment_hash: Option<String>,
|
||||
external_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
|
194
src/mocks.rs
194
src/mocks.rs
@ -1,19 +1,14 @@
|
||||
#![allow(unused)]
|
||||
use crate::dns::{BasicRecord, DnsServer, RecordType};
|
||||
use crate::exchange::{ExchangeRateService, Ticker, TickerRate};
|
||||
use crate::host::{FullVmInfo, TimeSeries, TimeSeriesData, VmHostClient};
|
||||
use crate::host::{FullVmInfo, TerminalStream, TimeSeries, TimeSeriesData, VmHostClient, VmHostInfo};
|
||||
use crate::lightning::{AddInvoiceRequest, AddInvoiceResult, InvoiceUpdate, LightningNode};
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use crate::settings::NetworkPolicy;
|
||||
use crate::status::{VmRunningState, VmState};
|
||||
use anyhow::{anyhow, bail, ensure, Context};
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use fedimint_tonic_lnd::tonic::codegen::tokio_stream::Stream;
|
||||
use lnvps_db::{
|
||||
async_trait, DiskInterface, DiskType, IpRange, LNVpsDb, OsDistribution, User, UserSshKey, Vm,
|
||||
VmCostPlan, VmCostPlanIntervalType, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate,
|
||||
VmHost, VmHostDisk, VmHostKind, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment, VmTemplate,
|
||||
};
|
||||
use lnvps_db::{async_trait, AccessPolicy, DiskInterface, DiskType, IpRange, IpRangeAllocationMode, LNVpsDb, OsDistribution, User, UserSshKey, Vm, VmCostPlan, VmCostPlanIntervalType, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate, VmHost, VmHostDisk, VmHostKind, VmHostRegion, VmIpAssignment, VmOsImage, VmPayment, VmTemplate};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::pin::Pin;
|
||||
@ -37,14 +32,11 @@ pub struct MockDb {
|
||||
pub custom_pricing_disk: Arc<Mutex<HashMap<u64, VmCustomPricingDisk>>>,
|
||||
pub custom_template: Arc<Mutex<HashMap<u64, VmCustomTemplate>>>,
|
||||
pub payments: Arc<Mutex<Vec<VmPayment>>>,
|
||||
pub router: Arc<Mutex<HashMap<u64, lnvps_db::Router>>>,
|
||||
pub access_policy: Arc<Mutex<HashMap<u64, AccessPolicy>>>,
|
||||
}
|
||||
|
||||
impl MockDb {
|
||||
pub const KB: u64 = 1024;
|
||||
pub const MB: u64 = Self::KB * 1024;
|
||||
pub const GB: u64 = Self::MB * 1024;
|
||||
pub const TB: u64 = Self::GB * 1024;
|
||||
|
||||
pub fn empty() -> MockDb {
|
||||
Self {
|
||||
..Default::default()
|
||||
@ -71,8 +63,8 @@ impl MockDb {
|
||||
created: Utc::now(),
|
||||
expires: None,
|
||||
cpu: 2,
|
||||
memory: Self::GB * 2,
|
||||
disk_size: Self::GB * 64,
|
||||
memory: crate::GB * 2,
|
||||
disk_size: crate::GB * 64,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
@ -120,6 +112,19 @@ impl Default for MockDb {
|
||||
gateway: "10.0.0.1/8".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
ip_ranges.insert(
|
||||
2,
|
||||
IpRange {
|
||||
id: 2,
|
||||
cidr: "fd00::/64".to_string(),
|
||||
gateway: "fd00::1".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
allocation_mode: IpRangeAllocationMode::SlaacEui64,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
let mut hosts = HashMap::new();
|
||||
@ -132,10 +137,13 @@ impl Default for MockDb {
|
||||
name: "mock-host".to_string(),
|
||||
ip: "https://localhost".to_string(),
|
||||
cpu: 4,
|
||||
memory: 8 * Self::GB,
|
||||
memory: 8 * crate::GB,
|
||||
enabled: true,
|
||||
api_token: "".to_string(),
|
||||
load_factor: 1.5,
|
||||
load_cpu: 1.5,
|
||||
load_memory: 2.0,
|
||||
load_disk: 3.0,
|
||||
vlan_id: Some(100),
|
||||
},
|
||||
);
|
||||
let mut host_disks = HashMap::new();
|
||||
@ -145,7 +153,7 @@ impl Default for MockDb {
|
||||
id: 1,
|
||||
host_id: 1,
|
||||
name: "mock-disk".to_string(),
|
||||
size: Self::TB * 10,
|
||||
size: crate::TB * 10,
|
||||
kind: DiskType::SSD,
|
||||
interface: DiskInterface::PCIe,
|
||||
enabled: true,
|
||||
@ -184,6 +192,8 @@ impl Default for MockDb {
|
||||
user_ssh_keys: Arc::new(Mutex::new(Default::default())),
|
||||
custom_template: Arc::new(Default::default()),
|
||||
payments: Arc::new(Default::default()),
|
||||
router: Arc::new(Default::default()),
|
||||
access_policy: Arc::new(Default::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -209,6 +219,7 @@ impl LNVpsDb for MockDb {
|
||||
email: None,
|
||||
contact_nip17: false,
|
||||
contact_email: false,
|
||||
country_code: Some("USA".to_string()),
|
||||
},
|
||||
);
|
||||
Ok(max + 1)
|
||||
@ -269,11 +280,26 @@ impl LNVpsDb for MockDb {
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn list_host_region(&self) -> anyhow::Result<Vec<VmHostRegion>> {
|
||||
let regions = self.regions.lock().await;
|
||||
Ok(regions.values().filter(|r| r.enabled).cloned().collect())
|
||||
}
|
||||
|
||||
async fn get_host_region(&self, id: u64) -> anyhow::Result<VmHostRegion> {
|
||||
let regions = self.regions.lock().await;
|
||||
Ok(regions.get(&id).ok_or(anyhow!("no region"))?.clone())
|
||||
}
|
||||
|
||||
async fn get_host_region_by_name(&self, name: &str) -> anyhow::Result<VmHostRegion> {
|
||||
let regions = self.regions.lock().await;
|
||||
Ok(regions
|
||||
.iter()
|
||||
.find(|(_, v)| v.name == name)
|
||||
.ok_or(anyhow!("no region"))?
|
||||
.1
|
||||
.clone())
|
||||
}
|
||||
|
||||
async fn list_hosts(&self) -> anyhow::Result<Vec<VmHost>> {
|
||||
let hosts = self.hosts.lock().await;
|
||||
Ok(hosts.values().filter(|h| h.enabled).cloned().collect())
|
||||
@ -304,6 +330,16 @@ impl LNVpsDb for MockDb {
|
||||
Ok(disks.get(&disk_id).ok_or(anyhow!("no disk"))?.clone())
|
||||
}
|
||||
|
||||
async fn update_host_disk(&self, disk: &VmHostDisk) -> anyhow::Result<()> {
|
||||
let mut disks = self.host_disks.lock().await;
|
||||
if let Some(d) = disks.get_mut(&disk.id) {
|
||||
d.size = disk.size;
|
||||
d.kind = disk.kind;
|
||||
d.interface = disk.interface;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_os_image(&self, id: u64) -> anyhow::Result<VmOsImage> {
|
||||
let os_images = self.os_images.lock().await;
|
||||
Ok(os_images.get(&id).ok_or(anyhow!("no image"))?.clone())
|
||||
@ -458,7 +494,7 @@ impl LNVpsDb for MockDb {
|
||||
|
||||
async fn update_vm_ip_assignment(&self, ip_assignment: &VmIpAssignment) -> anyhow::Result<()> {
|
||||
let mut ip_assignments = self.ip_assignments.lock().await;
|
||||
if let Some(i) = ip_assignments.get_mut(&ip_assignment.vm_id) {
|
||||
if let Some(i) = ip_assignments.get_mut(&ip_assignment.id) {
|
||||
i.arp_ref = ip_assignment.arp_ref.clone();
|
||||
i.dns_forward = ip_assignment.dns_forward.clone();
|
||||
i.dns_reverse = ip_assignment.dns_reverse.clone();
|
||||
@ -518,11 +554,18 @@ impl LNVpsDb for MockDb {
|
||||
.clone())
|
||||
}
|
||||
|
||||
async fn get_vm_payment_by_ext_id(&self, id: &str) -> anyhow::Result<VmPayment> {
|
||||
let p = self.payments.lock().await;
|
||||
Ok(p.iter()
|
||||
.find(|p| p.external_id == Some(id.to_string()))
|
||||
.context("no vm_payment")?
|
||||
.clone())
|
||||
}
|
||||
|
||||
async fn update_vm_payment(&self, vm_payment: &VmPayment) -> anyhow::Result<()> {
|
||||
let mut p = self.payments.lock().await;
|
||||
if let Some(p) = p.iter_mut().find(|p| p.id == *vm_payment.id) {
|
||||
p.is_paid = vm_payment.is_paid.clone();
|
||||
p.settle_index = vm_payment.settle_index.clone();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -539,7 +582,8 @@ impl LNVpsDb for MockDb {
|
||||
async fn last_paid_invoice(&self) -> anyhow::Result<Option<VmPayment>> {
|
||||
let p = self.payments.lock().await;
|
||||
Ok(p.iter()
|
||||
.max_by(|a, b| a.settle_index.cmp(&b.settle_index))
|
||||
.filter(|p| p.is_paid)
|
||||
.max_by(|a, b| a.created.cmp(&b.created))
|
||||
.map(|v| v.clone()))
|
||||
}
|
||||
|
||||
@ -581,27 +625,41 @@ impl LNVpsDb for MockDb {
|
||||
.cloned()
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn get_router(&self, router_id: u64) -> anyhow::Result<lnvps_db::Router> {
|
||||
let r = self.router.lock().await;
|
||||
Ok(r.get(&router_id).cloned().context("no router")?)
|
||||
}
|
||||
|
||||
async fn get_access_policy(&self, access_policy_id: u64) -> anyhow::Result<AccessPolicy> {
|
||||
let p = self.access_policy.lock().await;
|
||||
Ok(p.get(&access_policy_id)
|
||||
.cloned()
|
||||
.context("no access policy")?)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MockRouter {
|
||||
pub policy: NetworkPolicy,
|
||||
arp: Arc<Mutex<HashMap<u64, ArpEntry>>>,
|
||||
}
|
||||
|
||||
impl MockRouter {
|
||||
pub fn new(policy: NetworkPolicy) -> Self {
|
||||
pub fn new() -> Self {
|
||||
static LAZY_ARP: LazyLock<Arc<Mutex<HashMap<u64, ArpEntry>>>> =
|
||||
LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
|
||||
|
||||
Self {
|
||||
policy,
|
||||
arp: LAZY_ARP.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl Router for MockRouter {
|
||||
async fn generate_mac(&self, ip: &str, comment: &str) -> anyhow::Result<Option<ArpEntry>> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn list_arp_entry(&self) -> anyhow::Result<Vec<ArpEntry>> {
|
||||
let arp = self.arp.lock().await;
|
||||
Ok(arp.values().cloned().collect())
|
||||
@ -642,14 +700,15 @@ impl Router for MockRouter {
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct MockNode {
|
||||
invoices: Arc<Mutex<HashMap<String, MockInvoice>>>,
|
||||
pub invoices: Arc<Mutex<HashMap<String, MockInvoice>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct MockInvoice {
|
||||
pr: String,
|
||||
expiry: DateTime<Utc>,
|
||||
settle_index: u64,
|
||||
pub struct MockInvoice {
|
||||
pub pr: String,
|
||||
pub amount: u64,
|
||||
pub expiry: DateTime<Utc>,
|
||||
pub is_paid: bool,
|
||||
}
|
||||
|
||||
impl MockNode {
|
||||
@ -665,7 +724,23 @@ impl MockNode {
|
||||
#[async_trait]
|
||||
impl LightningNode for MockNode {
|
||||
async fn add_invoice(&self, req: AddInvoiceRequest) -> anyhow::Result<AddInvoiceResult> {
|
||||
todo!()
|
||||
let mut invoices = self.invoices.lock().await;
|
||||
let id: [u8; 32] = rand::random();
|
||||
let hex_id = hex::encode(id);
|
||||
invoices.insert(
|
||||
hex_id.clone(),
|
||||
MockInvoice {
|
||||
pr: format!("lnrt1{}", hex_id),
|
||||
amount: req.amount,
|
||||
expiry: Utc::now().add(TimeDelta::seconds(req.expire.unwrap_or(3600) as i64)),
|
||||
is_paid: false,
|
||||
},
|
||||
);
|
||||
Ok(AddInvoiceResult {
|
||||
pr: format!("lnrt1{}", hex_id),
|
||||
payment_hash: hex_id.clone(),
|
||||
external_id: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn subscribe_invoices(
|
||||
@ -698,6 +773,10 @@ impl MockVmHost {
|
||||
|
||||
#[async_trait]
|
||||
impl VmHostClient for MockVmHost {
|
||||
async fn get_info(&self) -> anyhow::Result<VmHostInfo> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn download_os_image(&self, image: &VmOsImage) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
@ -747,6 +826,10 @@ impl VmHostClient for MockVmHost {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn reinstall_vm(&self, cfg: &FullVmInfo) -> anyhow::Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn get_vm_state(&self, vm: &Vm) -> anyhow::Result<VmState> {
|
||||
let vms = self.vms.lock().await;
|
||||
if let Some(vm) = vms.get(&vm.id) {
|
||||
@ -777,11 +860,14 @@ impl VmHostClient for MockVmHost {
|
||||
) -> anyhow::Result<Vec<TimeSeriesData>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn connect_terminal(&self, vm: &Vm) -> anyhow::Result<TerminalStream> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MockDnsServer {
|
||||
pub forward: Arc<Mutex<HashMap<String, MockDnsEntry>>>,
|
||||
pub reverse: Arc<Mutex<HashMap<String, MockDnsEntry>>>,
|
||||
pub zones: Arc<Mutex<HashMap<String, HashMap<String, MockDnsEntry>>>>,
|
||||
}
|
||||
|
||||
pub struct MockDnsEntry {
|
||||
@ -792,25 +878,25 @@ pub struct MockDnsEntry {
|
||||
|
||||
impl MockDnsServer {
|
||||
pub fn new() -> Self {
|
||||
static LAZY_FWD: LazyLock<Arc<Mutex<HashMap<String, MockDnsEntry>>>> =
|
||||
LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
|
||||
static LAZY_REV: LazyLock<Arc<Mutex<HashMap<String, MockDnsEntry>>>> =
|
||||
static LAZY_ZONES: LazyLock<Arc<Mutex<HashMap<String, HashMap<String, MockDnsEntry>>>>> =
|
||||
LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
|
||||
Self {
|
||||
forward: LAZY_FWD.clone(),
|
||||
reverse: LAZY_REV.clone(),
|
||||
zones: LAZY_ZONES.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl DnsServer for MockDnsServer {
|
||||
async fn add_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let mut table = match record.kind {
|
||||
RecordType::PTR => self.reverse.lock().await,
|
||||
_ => self.forward.lock().await,
|
||||
async fn add_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let mut zones = self.zones.lock().await;
|
||||
let table = if let Some(t) = zones.get_mut(zone_id) {
|
||||
t
|
||||
} else {
|
||||
zones.insert(zone_id.to_string(), HashMap::new());
|
||||
zones.get_mut(zone_id).unwrap()
|
||||
};
|
||||
|
||||
if table.values().any(|v| v.name == record.name) {
|
||||
if table.values().any(|v| v.name == record.name && v.kind == record.kind.to_string()) {
|
||||
bail!("Duplicate record with name {}", record.name);
|
||||
}
|
||||
|
||||
@ -835,20 +921,30 @@ impl DnsServer for MockDnsServer {
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_record(&self, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let mut table = match record.kind {
|
||||
RecordType::PTR => self.reverse.lock().await,
|
||||
_ => self.forward.lock().await,
|
||||
async fn delete_record(&self, zone_id: &str, record: &BasicRecord) -> anyhow::Result<()> {
|
||||
let mut zones = self.zones.lock().await;
|
||||
let table = if let Some(t) = zones.get_mut(zone_id) {
|
||||
t
|
||||
} else {
|
||||
zones.insert(zone_id.to_string(), HashMap::new());
|
||||
zones.get_mut(zone_id).unwrap()
|
||||
};
|
||||
ensure!(record.id.is_some(), "Id is missing");
|
||||
table.remove(record.id.as_ref().unwrap());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_record(&self, record: &BasicRecord) -> anyhow::Result<BasicRecord> {
|
||||
let mut table = match record.kind {
|
||||
RecordType::PTR => self.reverse.lock().await,
|
||||
_ => self.forward.lock().await,
|
||||
async fn update_record(
|
||||
&self,
|
||||
zone_id: &str,
|
||||
record: &BasicRecord,
|
||||
) -> anyhow::Result<BasicRecord> {
|
||||
let mut zones = self.zones.lock().await;
|
||||
let table = if let Some(t) = zones.get_mut(zone_id) {
|
||||
t
|
||||
} else {
|
||||
zones.insert(zone_id.to_string(), HashMap::new());
|
||||
zones.get_mut(zone_id).unwrap()
|
||||
};
|
||||
ensure!(record.id.is_some(), "Id is missing");
|
||||
if let Some(mut r) = table.get_mut(record.id.as_ref().unwrap()) {
|
||||
|
@ -98,7 +98,7 @@ impl<'r> FromRequest<'r> for Nip98Auth {
|
||||
}
|
||||
let auth = Nip98Auth::from_base64(&auth[6..]).unwrap();
|
||||
match auth.check(
|
||||
request.uri().to_string().as_str(),
|
||||
request.uri().path().to_string().as_str(),
|
||||
request.method().as_str(),
|
||||
) {
|
||||
Ok(_) => Outcome::Success(auth),
|
||||
|
87
src/payments/invoice.rs
Normal file
87
src/payments/invoice.rs
Normal file
@ -0,0 +1,87 @@
|
||||
use crate::lightning::{InvoiceUpdate, LightningNode};
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::{LNVpsDb, VmPayment};
|
||||
use log::{error, info, warn};
|
||||
use nostr::util::hex;
|
||||
use rocket::futures::StreamExt;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
pub struct NodeInvoiceHandler {
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
}
|
||||
|
||||
impl NodeInvoiceHandler {
|
||||
pub fn new(
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
tx: UnboundedSender<WorkJob>,
|
||||
) -> Self {
|
||||
Self { node, tx, db }
|
||||
}
|
||||
|
||||
async fn mark_paid(&self, id: &Vec<u8>) -> Result<()> {
|
||||
let p = self.db.get_vm_payment(id).await?;
|
||||
self.mark_payment_paid(&p).await
|
||||
}
|
||||
|
||||
async fn mark_paid_ext_id(&self, external_id: &str) -> Result<()> {
|
||||
let p = self.db.get_vm_payment_by_ext_id(external_id).await?;
|
||||
self.mark_payment_paid(&p).await
|
||||
}
|
||||
|
||||
async fn mark_payment_paid(&self, payment: &VmPayment) -> Result<()> {
|
||||
self.db.vm_payment_paid(&payment).await?;
|
||||
|
||||
info!(
|
||||
"VM payment {} for {}, paid",
|
||||
hex::encode(&payment.id),
|
||||
payment.vm_id
|
||||
);
|
||||
self.tx.send(WorkJob::CheckVm {
|
||||
vm_id: payment.vm_id,
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self) -> Result<()> {
|
||||
let from_ph = self.db.last_paid_invoice().await?.map(|i| i.id.clone());
|
||||
info!(
|
||||
"Listening for invoices from {}",
|
||||
from_ph
|
||||
.as_ref()
|
||||
.map(hex::encode)
|
||||
.unwrap_or("NOW".to_string())
|
||||
);
|
||||
|
||||
let mut handler = self.node.subscribe_invoices(from_ph).await?;
|
||||
while let Some(msg) = handler.next().await {
|
||||
match msg {
|
||||
InvoiceUpdate::Settled {
|
||||
payment_hash,
|
||||
external_id,
|
||||
} => {
|
||||
if let Some(h) = payment_hash {
|
||||
let r_hash = hex::decode(h)?;
|
||||
if let Err(e) = self.mark_paid(&r_hash).await {
|
||||
error!("{}", e);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if let Some(e) = external_id {
|
||||
if let Err(e) = self.mark_paid_ext_id(&e).await {
|
||||
error!("{}", e);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
v => warn!("Unknown invoice update: {:?}", v),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
55
src/payments/mod.rs
Normal file
55
src/payments/mod.rs
Normal file
@ -0,0 +1,55 @@
|
||||
use crate::lightning::LightningNode;
|
||||
use crate::payments::invoice::NodeInvoiceHandler;
|
||||
use crate::settings::Settings;
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::Result;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::error;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::time::sleep;
|
||||
|
||||
mod invoice;
|
||||
#[cfg(feature = "revolut")]
|
||||
mod revolut;
|
||||
|
||||
pub fn listen_all_payments(
|
||||
settings: &Settings,
|
||||
node: Arc<dyn LightningNode>,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
sender: UnboundedSender<WorkJob>,
|
||||
) -> Result<()> {
|
||||
let mut handler = NodeInvoiceHandler::new(node.clone(), db.clone(), sender.clone());
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = handler.listen().await {
|
||||
error!("invoice-error: {}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "revolut")]
|
||||
{
|
||||
use crate::payments::revolut::RevolutPaymentHandler;
|
||||
if let Some(r) = &settings.revolut {
|
||||
let mut handler = RevolutPaymentHandler::new(
|
||||
r.clone(),
|
||||
&settings.public_url,
|
||||
db.clone(),
|
||||
sender.clone(),
|
||||
)?;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = handler.listen().await {
|
||||
error!("revolut-error: {}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
156
src/payments/revolut.rs
Normal file
156
src/payments/revolut.rs
Normal file
@ -0,0 +1,156 @@
|
||||
use crate::api::{WebhookMessage, WEBHOOK_BRIDGE};
|
||||
use crate::fiat::{RevolutApi, RevolutWebhookEvent};
|
||||
use crate::settings::RevolutConfig;
|
||||
use crate::worker::WorkJob;
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use hmac::{Hmac, Mac};
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use log::{error, info, warn};
|
||||
use reqwest::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
|
||||
pub struct RevolutPaymentHandler {
|
||||
api: RevolutApi,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
sender: UnboundedSender<WorkJob>,
|
||||
public_url: String,
|
||||
}
|
||||
|
||||
impl RevolutPaymentHandler {
|
||||
pub fn new(
|
||||
settings: RevolutConfig,
|
||||
public_url: &str,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
sender: UnboundedSender<WorkJob>,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
api: RevolutApi::new(settings)?,
|
||||
public_url: public_url.to_string(),
|
||||
db,
|
||||
sender,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn listen(&mut self) -> Result<()> {
|
||||
let this_webhook = Url::parse(&self.public_url)?.join("/api/v1/webhook/revolut")?;
|
||||
let webhooks = self.api.list_webhooks().await?;
|
||||
for wh in webhooks {
|
||||
info!("Deleting old webhook: {} {}", wh.id, wh.url);
|
||||
self.api.delete_webhook(&wh.id).await?
|
||||
}
|
||||
info!("Setting up webhook for '{}'", this_webhook);
|
||||
let wh = self
|
||||
.api
|
||||
.create_webhook(
|
||||
this_webhook.as_str(),
|
||||
vec![
|
||||
RevolutWebhookEvent::OrderCompleted,
|
||||
RevolutWebhookEvent::OrderAuthorised,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let secret = wh.signing_secret.context("Signing secret is missing")?;
|
||||
// listen to events
|
||||
let mut listenr = WEBHOOK_BRIDGE.listen();
|
||||
while let Ok(m) = listenr.recv().await {
|
||||
if m.endpoint != "/api/v1/webhook/revolut" {
|
||||
continue;
|
||||
}
|
||||
let body: RevolutWebhook = serde_json::from_slice(m.body.as_slice())?;
|
||||
info!("Received webhook {:?}", body);
|
||||
if let Err(e) = verify_webhook(&secret, &m) {
|
||||
error!("Signature verification failed: {}", e);
|
||||
continue;
|
||||
}
|
||||
|
||||
if let RevolutWebhookEvent::OrderCompleted = body.event {
|
||||
if let Err(e) = self.try_complete_payment(&body.order_id).await {
|
||||
error!("Failed to complete order: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_complete_payment(&self, ext_id: &str) -> Result<()> {
|
||||
let mut p = self.db.get_vm_payment_by_ext_id(ext_id).await?;
|
||||
|
||||
// save payment state json into external_data
|
||||
// TODO: encrypt payment_data
|
||||
let order = self.api.get_order(ext_id).await?;
|
||||
p.external_data = serde_json::to_string(&order)?;
|
||||
|
||||
// check user country matches card country
|
||||
if let Some(cc) = order
|
||||
.payments
|
||||
.and_then(|p| p.first().cloned())
|
||||
.and_then(|p| p.payment_method)
|
||||
.and_then(|p| p.card_country_code)
|
||||
.and_then(|c| CountryCode::for_alpha2(&c).ok())
|
||||
{
|
||||
let vm = self.db.get_vm(p.vm_id).await?;
|
||||
let mut user = self.db.get_user(vm.user_id).await?;
|
||||
if user.country_code.is_none() {
|
||||
// update user country code to match card country
|
||||
user.country_code = Some(cc.alpha3().to_string());
|
||||
self.db.update_user(&user).await?;
|
||||
}
|
||||
}
|
||||
|
||||
self.db.vm_payment_paid(&p).await?;
|
||||
self.sender.send(WorkJob::CheckVm { vm_id: p.vm_id })?;
|
||||
info!("VM payment {} for {}, paid", hex::encode(p.id), p.vm_id);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
type HmacSha256 = Hmac<sha2::Sha256>;
|
||||
fn verify_webhook(secret: &str, msg: &WebhookMessage) -> Result<()> {
|
||||
let sig = msg
|
||||
.headers
|
||||
.get("revolut-signature")
|
||||
.ok_or_else(|| anyhow!("Missing Revolut-Signature header"))?;
|
||||
let timestamp = msg
|
||||
.headers
|
||||
.get("revolut-request-timestamp")
|
||||
.ok_or_else(|| anyhow!("Missing Revolut-Request-Timestamp header"))?;
|
||||
|
||||
// check if any signatures match
|
||||
for sig in sig.split(",") {
|
||||
let mut sig_split = sig.split("=");
|
||||
let (version, code) = (
|
||||
sig_split.next().context("Invalid signature format")?,
|
||||
sig_split.next().context("Invalid signature format")?,
|
||||
);
|
||||
let mut mac = HmacSha256::new_from_slice(secret.as_bytes())?;
|
||||
mac.update(version.as_bytes());
|
||||
mac.update(b".");
|
||||
mac.update(timestamp.as_bytes());
|
||||
mac.update(b".");
|
||||
mac.update(msg.body.as_slice());
|
||||
let result = mac.finalize().into_bytes();
|
||||
|
||||
if hex::encode(result) == code {
|
||||
return Ok(());
|
||||
} else {
|
||||
warn!(
|
||||
"Invalid signature found {} != {}",
|
||||
code,
|
||||
hex::encode(result)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
bail!("No valid signature found!");
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
struct RevolutWebhook {
|
||||
pub event: RevolutWebhookEvent,
|
||||
pub order_id: String,
|
||||
pub merchant_order_ext_ref: Option<String>,
|
||||
}
|
@ -2,8 +2,10 @@ use crate::provisioner::Template;
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::Utc;
|
||||
use futures::future::join_all;
|
||||
use ipnetwork::{IpNetwork, NetworkSize};
|
||||
use lnvps_db::{
|
||||
DiskInterface, DiskType, LNVpsDb, VmCustomTemplate, VmHost, VmHostDisk, VmTemplate,
|
||||
DiskInterface, DiskType, IpRange, LNVpsDb, VmCustomTemplate, VmHost, VmHostDisk,
|
||||
VmIpAssignment, VmTemplate,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
@ -80,8 +82,25 @@ impl HostCapacityService {
|
||||
disk_interface: Option<DiskInterface>,
|
||||
) -> Result<HostCapacity> {
|
||||
let vms = self.db.list_vms_on_host(host.id).await?;
|
||||
|
||||
// load ip ranges
|
||||
let ip_ranges = self.db.list_ip_range_in_region(host.region_id).await?;
|
||||
// TODO: handle very large number of assignments, maybe just count assignments
|
||||
let ip_range_assigned: Vec<VmIpAssignment> = join_all(
|
||||
ip_ranges
|
||||
.iter()
|
||||
.map(|r| self.db.list_vm_ip_assignments_in_range(r.id)),
|
||||
)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|r| r.ok())
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
// TODO: filter disks from DB? Should be very few disks anyway
|
||||
let storage = self.db.list_host_disks(host.id).await?;
|
||||
|
||||
// load templates
|
||||
let templates = self.db.list_vm_templates().await?;
|
||||
let custom_templates: Vec<Result<VmCustomTemplate>> = join_all(
|
||||
vms.iter()
|
||||
@ -145,10 +164,10 @@ impl HostCapacityService {
|
||||
.map(|s| {
|
||||
let usage = vm_resources
|
||||
.iter()
|
||||
.filter(|(k, v)| s.id == v.disk_id)
|
||||
.fold(0, |acc, (k, v)| acc + v.disk);
|
||||
.filter(|(_k, v)| s.id == v.disk_id)
|
||||
.fold(0, |acc, (_k, v)| acc + v.disk);
|
||||
DiskCapacity {
|
||||
load_factor: host.load_factor,
|
||||
load_factor: host.load_disk,
|
||||
disk: s.clone(),
|
||||
usage,
|
||||
}
|
||||
@ -161,19 +180,40 @@ impl HostCapacityService {
|
||||
let memory_consumed = vm_resources.values().fold(0, |acc, vm| acc + vm.memory);
|
||||
|
||||
Ok(HostCapacity {
|
||||
load_factor: host.load_factor,
|
||||
load_factor: LoadFactors {
|
||||
cpu: host.load_cpu,
|
||||
memory: host.load_memory,
|
||||
disk: host.load_disk,
|
||||
},
|
||||
host: host.clone(),
|
||||
cpu: cpu_consumed,
|
||||
memory: memory_consumed,
|
||||
disks: storage_disks,
|
||||
ranges: ip_ranges
|
||||
.into_iter()
|
||||
.map(|r| IPRangeCapacity {
|
||||
usage: ip_range_assigned
|
||||
.iter()
|
||||
.filter(|z| z.ip_range_id == r.id)
|
||||
.count() as u128,
|
||||
range: r,
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LoadFactors {
|
||||
pub cpu: f32,
|
||||
pub memory: f32,
|
||||
pub disk: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HostCapacity {
|
||||
/// Load factor applied to resource consumption
|
||||
pub load_factor: f32,
|
||||
pub load_factor: LoadFactors,
|
||||
/// The host
|
||||
pub host: VmHost,
|
||||
/// Number of consumed CPU cores
|
||||
@ -182,6 +222,8 @@ pub struct HostCapacity {
|
||||
pub memory: u64,
|
||||
/// List of disks on the host and its used space
|
||||
pub disks: Vec<DiskCapacity>,
|
||||
/// List of IP ranges and its usage
|
||||
pub ranges: Vec<IPRangeCapacity>,
|
||||
}
|
||||
|
||||
impl HostCapacity {
|
||||
@ -192,23 +234,24 @@ impl HostCapacity {
|
||||
|
||||
/// CPU usage as a percentage
|
||||
pub fn cpu_load(&self) -> f32 {
|
||||
self.cpu as f32 / (self.host.cpu as f32 * self.load_factor)
|
||||
self.cpu as f32 / (self.host.cpu as f32 * self.load_factor.cpu)
|
||||
}
|
||||
|
||||
/// Total number of available CPUs
|
||||
pub fn available_cpu(&self) -> u16 {
|
||||
let loaded_host_cpu = (self.host.cpu as f32 * self.load_factor).floor() as u16;
|
||||
let loaded_host_cpu = (self.host.cpu as f32 * self.load_factor.cpu).floor() as u16;
|
||||
loaded_host_cpu.saturating_sub(self.cpu)
|
||||
}
|
||||
|
||||
/// Memory usage as a percentage
|
||||
pub fn memory_load(&self) -> f32 {
|
||||
self.memory as f32 / (self.host.memory as f32 * self.load_factor)
|
||||
self.memory as f32 / (self.host.memory as f32 * self.load_factor.memory)
|
||||
}
|
||||
|
||||
/// Total available bytes of memory
|
||||
pub fn available_memory(&self) -> u64 {
|
||||
let loaded_host_memory = (self.host.memory as f64 * self.load_factor as f64).floor() as u64;
|
||||
let loaded_host_memory =
|
||||
(self.host.memory as f64 * self.load_factor.memory as f64).floor() as u64;
|
||||
loaded_host_memory.saturating_sub(self.memory)
|
||||
}
|
||||
|
||||
@ -225,6 +268,7 @@ impl HostCapacity {
|
||||
.disks
|
||||
.iter()
|
||||
.any(|d| d.available_capacity() >= template.disk_size())
|
||||
&& self.ranges.iter().any(|r| r.available_capacity() >= 1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -251,6 +295,30 @@ impl DiskCapacity {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IPRangeCapacity {
|
||||
/// IP Range
|
||||
pub range: IpRange,
|
||||
/// Number of allocated IPs
|
||||
pub usage: u128,
|
||||
}
|
||||
|
||||
impl IPRangeCapacity {
|
||||
// first/last/gw
|
||||
const RESERVED: u128 = 3;
|
||||
|
||||
/// Total number of IPs free
|
||||
pub fn available_capacity(&self) -> u128 {
|
||||
let net: IpNetwork = self.range.cidr.parse().unwrap();
|
||||
|
||||
match net.size() {
|
||||
NetworkSize::V4(s) => (s as u128).saturating_sub(self.usage),
|
||||
NetworkSize::V6(s) => s.saturating_sub(self.usage),
|
||||
}
|
||||
.saturating_sub(Self::RESERVED)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -259,7 +327,11 @@ mod tests {
|
||||
#[test]
|
||||
fn loads() {
|
||||
let cap = HostCapacity {
|
||||
load_factor: 2.0,
|
||||
load_factor: LoadFactors {
|
||||
cpu: 2.0,
|
||||
memory: 3.0,
|
||||
disk: 4.0,
|
||||
},
|
||||
host: VmHost {
|
||||
cpu: 100,
|
||||
memory: 100,
|
||||
@ -268,23 +340,41 @@ mod tests {
|
||||
cpu: 8,
|
||||
memory: 8,
|
||||
disks: vec![DiskCapacity {
|
||||
load_factor: 2.0,
|
||||
load_factor: 4.0,
|
||||
disk: VmHostDisk {
|
||||
size: 100,
|
||||
..Default::default()
|
||||
},
|
||||
usage: 8,
|
||||
}],
|
||||
ranges: vec![IPRangeCapacity {
|
||||
range: IpRange {
|
||||
id: 1,
|
||||
cidr: "10.0.0.0/24".to_string(),
|
||||
gateway: "10.0.0.1".to_string(),
|
||||
enabled: true,
|
||||
region_id: 1,
|
||||
..Default::default()
|
||||
},
|
||||
usage: 69,
|
||||
}],
|
||||
};
|
||||
|
||||
// load factor halves load values 8/100 * (1/load_factor)
|
||||
assert_eq!(cap.load(), 0.04);
|
||||
assert_eq!(cap.cpu_load(), 0.04);
|
||||
assert_eq!(cap.memory_load(), 0.04);
|
||||
assert_eq!(cap.disk_load(), 0.04);
|
||||
// load factor doubles memory to 200, 200 - 8
|
||||
assert_eq!(cap.available_memory(), 192);
|
||||
assert_eq!(cap.cpu_load(), 8.0 / 200.0);
|
||||
assert_eq!(cap.memory_load(), 8.0 / 300.0);
|
||||
assert_eq!(cap.disk_load(), 8.0 / 400.0);
|
||||
assert_eq!(
|
||||
cap.load(),
|
||||
((8.0 / 200.0) + (8.0 / 300.0) + (8.0 / 400.0)) / 3.0
|
||||
);
|
||||
// load factor doubles memory to 300, 300 - 8
|
||||
assert_eq!(cap.available_memory(), 292);
|
||||
assert_eq!(cap.available_cpu(), 192);
|
||||
for r in cap.ranges {
|
||||
assert_eq!(r.usage, 69);
|
||||
assert_eq!(r.available_capacity(), 256 - 3 - 69);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
@ -1,18 +1,26 @@
|
||||
use crate::dns::{BasicRecord, DnsServer};
|
||||
use crate::exchange::{ExchangeRateService, Ticker};
|
||||
use crate::exchange::{Currency, CurrencyAmount, ExchangeRateService};
|
||||
use crate::fiat::FiatPaymentService;
|
||||
use crate::host::{get_host_client, FullVmInfo};
|
||||
use crate::lightning::{AddInvoiceRequest, LightningNode};
|
||||
use crate::provisioner::{
|
||||
CostResult, HostCapacityService, NetworkProvisioner, PricingEngine, ProvisionerMethod,
|
||||
AvailableIp, CostResult, HostCapacityService, NetworkProvisioner, PricingEngine,
|
||||
};
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use crate::settings::{NetworkAccessPolicy, NetworkPolicy, ProvisionerConfig, Settings};
|
||||
use crate::router::{ArpEntry, MikrotikRouter, OvhDedicatedServerVMacRouter, Router};
|
||||
use crate::settings::{ProvisionerConfig, Settings};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use chrono::{Days, Months, Utc};
|
||||
use lnvps_db::{DiskType, LNVpsDb, Vm, VmCostPlanIntervalType, VmCustomTemplate, VmIpAssignment, VmPayment};
|
||||
use chrono::Utc;
|
||||
use ipnetwork::IpNetwork;
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::{
|
||||
AccessPolicy, IpRangeAllocationMode, LNVpsDb, NetworkAccessPolicy, PaymentMethod, RouterKind,
|
||||
Vm, VmCustomTemplate, VmHost, VmIpAssignment, VmPayment,
|
||||
};
|
||||
use log::{info, warn};
|
||||
use nostr::util::hex;
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -25,11 +33,14 @@ pub struct LNVpsProvisioner {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
node: Arc<dyn LightningNode>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
tax_rates: HashMap<CountryCode, f32>,
|
||||
|
||||
router: Option<Arc<dyn Router>>,
|
||||
dns: Option<Arc<dyn DnsServer>>,
|
||||
revolut: Option<Arc<dyn FiatPaymentService>>,
|
||||
|
||||
network_policy: NetworkPolicy,
|
||||
/// Forward zone ID used for all VM's
|
||||
/// passed to the DNSServer type
|
||||
forward_zone_id: Option<String>,
|
||||
provisioner_config: ProvisionerConfig,
|
||||
}
|
||||
|
||||
@ -44,61 +55,98 @@ impl LNVpsProvisioner {
|
||||
db,
|
||||
node,
|
||||
rates,
|
||||
router: settings.get_router().expect("router config"),
|
||||
dns: settings.get_dns().expect("dns config"),
|
||||
network_policy: settings.network_policy,
|
||||
revolut: settings.get_revolut().expect("revolut config"),
|
||||
tax_rates: settings.tax_rate,
|
||||
provisioner_config: settings.provisioner,
|
||||
read_only: settings.read_only,
|
||||
forward_zone_id: settings.dns.map(|z| z.forward_zone_id),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_router(&self, router_id: u64) -> Result<Arc<dyn Router>> {
|
||||
#[cfg(test)]
|
||||
return Ok(Arc::new(crate::mocks::MockRouter::new()));
|
||||
|
||||
let cfg = self.db.get_router(router_id).await?;
|
||||
match cfg.kind {
|
||||
RouterKind::Mikrotik => {
|
||||
let mut t_split = cfg.token.split(":");
|
||||
let (username, password) = (
|
||||
t_split.next().context("Invalid username:password")?,
|
||||
t_split.next().context("Invalid username:password")?,
|
||||
);
|
||||
Ok(Arc::new(MikrotikRouter::new(&cfg.url, username, password)))
|
||||
}
|
||||
RouterKind::OvhAdditionalIp => Ok(Arc::new(
|
||||
OvhDedicatedServerVMacRouter::new(&cfg.url, &cfg.name, &cfg.token).await?,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create or Update access policy for a given ip assignment, does not save to database!
|
||||
pub async fn update_access_policy(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// apply network policy
|
||||
if let NetworkAccessPolicy::StaticArp { interface } = &self.network_policy.access {
|
||||
if let Some(r) = self.router.as_ref() {
|
||||
let vm = self.db.get_vm(assignment.vm_id).await?;
|
||||
let entry = ArpEntry::new(&vm, assignment, Some(interface.clone()))?;
|
||||
let arp = if let Some(_id) = &assignment.arp_ref {
|
||||
r.update_arp_entry(&entry).await?
|
||||
} else {
|
||||
r.add_arp_entry(&entry).await?
|
||||
};
|
||||
ensure!(arp.id.is_some(), "ARP id was empty");
|
||||
assignment.arp_ref = arp.id;
|
||||
pub async fn update_access_policy(
|
||||
&self,
|
||||
assignment: &mut VmIpAssignment,
|
||||
policy: &AccessPolicy,
|
||||
) -> Result<()> {
|
||||
let ip = IpNetwork::from_str(&assignment.ip)?;
|
||||
if matches!(policy.kind, NetworkAccessPolicy::StaticArp) && ip.is_ipv4() {
|
||||
let router = self
|
||||
.get_router(
|
||||
policy
|
||||
.router_id
|
||||
.context("Cannot apply static arp policy with no router")?,
|
||||
)
|
||||
.await?;
|
||||
let vm = self.db.get_vm(assignment.vm_id).await?;
|
||||
let entry = ArpEntry::new(&vm, assignment, policy.interface.clone())?;
|
||||
let arp = if let Some(_id) = &assignment.arp_ref {
|
||||
router.update_arp_entry(&entry).await?
|
||||
} else {
|
||||
bail!("No router found to apply static arp entry!")
|
||||
}
|
||||
router.add_arp_entry(&entry).await?
|
||||
};
|
||||
ensure!(arp.id.is_some(), "ARP id was empty");
|
||||
assignment.arp_ref = arp.id;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove an access policy for a given ip assignment, does not save to database!
|
||||
pub async fn remove_access_policy(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// Delete access policy
|
||||
if let NetworkAccessPolicy::StaticArp { .. } = &self.network_policy.access {
|
||||
if let Some(r) = self.router.as_ref() {
|
||||
let id = if let Some(id) = &assignment.arp_ref {
|
||||
Some(id.clone())
|
||||
pub async fn remove_access_policy(
|
||||
&self,
|
||||
assignment: &mut VmIpAssignment,
|
||||
policy: &AccessPolicy,
|
||||
) -> Result<()> {
|
||||
let ip = IpNetwork::from_str(&assignment.ip)?;
|
||||
if matches!(policy.kind, NetworkAccessPolicy::StaticArp) && ip.is_ipv4() {
|
||||
let router = self
|
||||
.get_router(
|
||||
policy
|
||||
.router_id
|
||||
.context("Cannot apply static arp policy with no router")?,
|
||||
)
|
||||
.await?;
|
||||
let id = if let Some(id) = &assignment.arp_ref {
|
||||
Some(id.clone())
|
||||
} else {
|
||||
warn!("ARP REF not found, using arp list");
|
||||
|
||||
let ent = router.list_arp_entry().await?;
|
||||
if let Some(ent) = ent.iter().find(|e| e.address == assignment.ip) {
|
||||
ent.id.clone()
|
||||
} else {
|
||||
warn!("ARP REF not found, using arp list");
|
||||
|
||||
let ent = r.list_arp_entry().await?;
|
||||
if let Some(ent) = ent.iter().find(|e| e.address == assignment.ip) {
|
||||
ent.id.clone()
|
||||
} else {
|
||||
warn!("ARP entry not found, skipping");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(id) = id {
|
||||
if let Err(e) = r.remove_arp_entry(&id).await {
|
||||
warn!("Failed to remove arp entry, skipping: {}", e);
|
||||
}
|
||||
warn!("ARP entry not found, skipping");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(id) = id {
|
||||
if let Err(e) = router.remove_arp_entry(&id).await {
|
||||
warn!("Failed to remove arp entry, skipping: {}", e);
|
||||
}
|
||||
assignment.arp_ref = None;
|
||||
}
|
||||
assignment.arp_ref = None;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -107,17 +155,19 @@ impl LNVpsProvisioner {
|
||||
pub async fn remove_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// Delete forward/reverse dns
|
||||
if let Some(dns) = &self.dns {
|
||||
if let Some(_r) = &assignment.dns_reverse_ref {
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
|
||||
if let (Some(z), Some(_ref)) = (&range.reverse_zone_id, &assignment.dns_reverse_ref) {
|
||||
let rev = BasicRecord::reverse(assignment)?;
|
||||
if let Err(e) = dns.delete_record(&rev).await {
|
||||
if let Err(e) = dns.delete_record(z, &rev).await {
|
||||
warn!("Failed to delete reverse record: {}", e);
|
||||
}
|
||||
assignment.dns_reverse_ref = None;
|
||||
assignment.dns_reverse = None;
|
||||
}
|
||||
if let Some(_r) = &assignment.dns_forward_ref {
|
||||
if let (Some(z), Some(_ref)) = (&self.forward_zone_id, &assignment.dns_forward_ref) {
|
||||
let rev = BasicRecord::forward(assignment)?;
|
||||
if let Err(e) = dns.delete_record(&rev).await {
|
||||
if let Err(e) = dns.delete_record(z, &rev).await {
|
||||
warn!("Failed to delete forward record: {}", e);
|
||||
}
|
||||
assignment.dns_forward_ref = None;
|
||||
@ -129,12 +179,12 @@ impl LNVpsProvisioner {
|
||||
|
||||
/// Update DNS on the dns server, does not save to database!
|
||||
pub async fn update_forward_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
if let Some(dns) = &self.dns {
|
||||
if let (Some(z), Some(dns)) = (&self.forward_zone_id, &self.dns) {
|
||||
let fwd = BasicRecord::forward(assignment)?;
|
||||
let ret_fwd = if fwd.id.is_some() {
|
||||
dns.update_record(&fwd).await?
|
||||
dns.update_record(z, &fwd).await?
|
||||
} else {
|
||||
dns.add_record(&fwd).await?
|
||||
dns.add_record(z, &fwd).await?
|
||||
};
|
||||
assignment.dns_forward = Some(ret_fwd.name);
|
||||
assignment.dns_forward_ref = Some(ret_fwd.id.context("Record id is missing")?);
|
||||
@ -145,15 +195,18 @@ impl LNVpsProvisioner {
|
||||
/// Update DNS on the dns server, does not save to database!
|
||||
pub async fn update_reverse_ip_dns(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
if let Some(dns) = &self.dns {
|
||||
let ret_rev = if assignment.dns_reverse_ref.is_some() {
|
||||
dns.update_record(&BasicRecord::reverse(assignment)?)
|
||||
.await?
|
||||
} else {
|
||||
dns.add_record(&BasicRecord::reverse_to_fwd(assignment)?)
|
||||
.await?
|
||||
};
|
||||
assignment.dns_reverse = Some(ret_rev.value);
|
||||
assignment.dns_reverse_ref = Some(ret_rev.id.context("Record id is missing")?);
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
if let Some(z) = &range.reverse_zone_id {
|
||||
let ret_rev = if assignment.dns_reverse_ref.is_some() {
|
||||
dns.update_record(z, &BasicRecord::reverse(assignment)?)
|
||||
.await?
|
||||
} else {
|
||||
dns.add_record(z, &BasicRecord::reverse_to_fwd(assignment)?)
|
||||
.await?
|
||||
};
|
||||
assignment.dns_reverse = Some(ret_rev.value);
|
||||
assignment.dns_reverse_ref = Some(ret_rev.id.context("Record id is missing")?);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -162,8 +215,13 @@ impl LNVpsProvisioner {
|
||||
pub async fn delete_ip_assignments(&self, vm_id: u64) -> Result<()> {
|
||||
let ips = self.db.list_vm_ip_assignments(vm_id).await?;
|
||||
for mut ip in ips {
|
||||
// remove access policy
|
||||
self.remove_access_policy(&mut ip).await?;
|
||||
// load range info to check access policy
|
||||
let range = self.db.get_ip_range(ip.ip_range_id).await?;
|
||||
if let Some(ap) = range.access_policy_id {
|
||||
let ap = self.db.get_access_policy(ap).await?;
|
||||
// remove access policy
|
||||
self.remove_access_policy(&mut ip, &ap).await?;
|
||||
}
|
||||
// remove dns
|
||||
self.remove_ip_dns(&mut ip).await?;
|
||||
// save arp/dns changes
|
||||
@ -176,8 +234,13 @@ impl LNVpsProvisioner {
|
||||
}
|
||||
|
||||
async fn save_ip_assignment(&self, assignment: &mut VmIpAssignment) -> Result<()> {
|
||||
// apply access policy
|
||||
self.update_access_policy(assignment).await?;
|
||||
// load range info to check access policy
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
if let Some(ap) = range.access_policy_id {
|
||||
let ap = self.db.get_access_policy(ap).await?;
|
||||
// apply access policy
|
||||
self.update_access_policy(assignment, &ap).await?;
|
||||
}
|
||||
|
||||
// Add DNS records
|
||||
self.update_forward_ip_dns(assignment).await?;
|
||||
@ -188,33 +251,115 @@ impl LNVpsProvisioner {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_mac_for_assignment(
|
||||
&self,
|
||||
host: &VmHost,
|
||||
vm: &Vm,
|
||||
assignment: &VmIpAssignment,
|
||||
) -> Result<ArpEntry> {
|
||||
let range = self.db.get_ip_range(assignment.ip_range_id).await?;
|
||||
|
||||
// ask router first if it wants to set the MAC
|
||||
if let Some(ap) = range.access_policy_id {
|
||||
let ap = self.db.get_access_policy(ap).await?;
|
||||
if let Some(rid) = ap.router_id {
|
||||
let router = self.get_router(rid).await?;
|
||||
|
||||
if let Some(mac) = router
|
||||
.generate_mac(&assignment.ip, &format!("VM{}", assignment.vm_id))
|
||||
.await?
|
||||
{
|
||||
return Ok(mac);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ask the host next to generate the mac
|
||||
let client = get_host_client(host, &self.provisioner_config)?;
|
||||
let mac = client.generate_mac(vm).await?;
|
||||
Ok(ArpEntry {
|
||||
id: None,
|
||||
address: assignment.ip.clone(),
|
||||
mac_address: mac,
|
||||
interface: None,
|
||||
comment: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn assign_available_v4_to_vm(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
v4: &AvailableIp,
|
||||
) -> Result<VmIpAssignment> {
|
||||
let mut assignment = VmIpAssignment {
|
||||
vm_id: vm.id,
|
||||
ip_range_id: v4.range_id,
|
||||
ip: v4.ip.ip().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.save_ip_assignment(&mut assignment).await?;
|
||||
Ok(assignment)
|
||||
}
|
||||
|
||||
pub async fn assign_available_v6_to_vm(
|
||||
&self,
|
||||
vm: &Vm,
|
||||
v6: &mut AvailableIp,
|
||||
) -> Result<VmIpAssignment> {
|
||||
match v6.mode {
|
||||
// it's a bit awkward, but we need to update the IP AFTER its been picked
|
||||
// simply because sometimes we don't know the MAC of the NIC yet
|
||||
IpRangeAllocationMode::SlaacEui64 => {
|
||||
let mac = NetworkProvisioner::parse_mac(&vm.mac_address)?;
|
||||
let addr = NetworkProvisioner::calculate_eui64(&mac, &v6.ip)?;
|
||||
v6.ip = IpNetwork::new(addr, v6.ip.prefix())?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let mut assignment = VmIpAssignment {
|
||||
vm_id: vm.id,
|
||||
ip_range_id: v6.range_id,
|
||||
ip: v6.ip.ip().to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
self.save_ip_assignment(&mut assignment).await?;
|
||||
Ok(assignment)
|
||||
}
|
||||
|
||||
async fn allocate_ips(&self, vm_id: u64) -> Result<Vec<VmIpAssignment>> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
let mut vm = self.db.get_vm(vm_id).await?;
|
||||
let existing_ips = self.db.list_vm_ip_assignments(vm_id).await?;
|
||||
if !existing_ips.is_empty() {
|
||||
return Ok(existing_ips);
|
||||
}
|
||||
|
||||
// Use random network provisioner
|
||||
let network = NetworkProvisioner::new(ProvisionerMethod::Random, self.db.clone());
|
||||
let network = NetworkProvisioner::new(self.db.clone());
|
||||
|
||||
let host = self.db.get_host(vm.host_id).await?;
|
||||
let ip = network.pick_ip_for_region(host.region_id).await?;
|
||||
let mut assignment = VmIpAssignment {
|
||||
id: 0,
|
||||
vm_id,
|
||||
ip_range_id: ip.range_id,
|
||||
ip: ip.ip.to_string(),
|
||||
deleted: false,
|
||||
arp_ref: None,
|
||||
dns_forward: None,
|
||||
dns_forward_ref: None,
|
||||
dns_reverse: None,
|
||||
dns_reverse_ref: None,
|
||||
};
|
||||
let mut assignments = vec![];
|
||||
match ip.ip4 {
|
||||
Some(v4) => {
|
||||
let mut assignment = self.assign_available_v4_to_vm(&vm, &v4).await?;
|
||||
|
||||
self.save_ip_assignment(&mut assignment).await?;
|
||||
Ok(vec![assignment])
|
||||
//generate mac address from ip assignment
|
||||
let mac = self.get_mac_for_assignment(&host, &vm, &assignment).await?;
|
||||
vm.mac_address = mac.mac_address;
|
||||
assignment.arp_ref = mac.id; // store ref if we got one
|
||||
self.db.update_vm(&vm).await?;
|
||||
|
||||
assignments.push(assignment);
|
||||
}
|
||||
/// TODO: add expected number of IPS per templates
|
||||
None => bail!("Cannot provision VM without an IPv4 address"),
|
||||
}
|
||||
if let Some(mut v6) = ip.ip6 {
|
||||
assignments.push(self.assign_available_v6_to_vm(&vm, &mut v6).await?);
|
||||
}
|
||||
|
||||
Ok(assignments)
|
||||
}
|
||||
|
||||
/// Do any necessary initialization
|
||||
@ -235,6 +380,11 @@ impl LNVpsProvisioner {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get database handle
|
||||
pub fn get_db(&self) -> Arc<dyn LNVpsDb> {
|
||||
self.db.clone()
|
||||
}
|
||||
|
||||
/// Provision a new VM for a user on the database
|
||||
///
|
||||
/// Note:
|
||||
@ -255,7 +405,9 @@ impl LNVpsProvisioner {
|
||||
|
||||
// TODO: cache capacity somewhere
|
||||
let cap = HostCapacityService::new(self.db.clone());
|
||||
let host = cap.get_host_for_template(template.region_id, &template).await?;
|
||||
let host = cap
|
||||
.get_host_for_template(template.region_id, &template)
|
||||
.await?;
|
||||
|
||||
let pick_disk = if let Some(hd) = host.disks.first() {
|
||||
hd
|
||||
@ -263,7 +415,6 @@ impl LNVpsProvisioner {
|
||||
bail!("No host disk found")
|
||||
};
|
||||
|
||||
let client = get_host_client(&host.host, &self.provisioner_config)?;
|
||||
let mut new_vm = Vm {
|
||||
id: 0,
|
||||
host_id: host.host.id,
|
||||
@ -275,14 +426,11 @@ impl LNVpsProvisioner {
|
||||
created: Utc::now(),
|
||||
expires: Utc::now(),
|
||||
disk_id: pick_disk.disk.id,
|
||||
mac_address: "NOT FILLED YET".to_string(),
|
||||
mac_address: "ff:ff:ff:ff:ff:ff".to_string(),
|
||||
deleted: false,
|
||||
ref_code,
|
||||
};
|
||||
|
||||
// ask host client to generate the mac address
|
||||
new_vm.mac_address = client.generate_mac(&new_vm).await?;
|
||||
|
||||
let new_id = self.db.insert_vm(&new_vm).await?;
|
||||
new_vm.id = new_id;
|
||||
Ok(new_vm)
|
||||
@ -308,7 +456,9 @@ impl LNVpsProvisioner {
|
||||
|
||||
// TODO: cache capacity somewhere
|
||||
let cap = HostCapacityService::new(self.db.clone());
|
||||
let host = cap.get_host_for_template(pricing.region_id, &template).await?;
|
||||
let host = cap
|
||||
.get_host_for_template(pricing.region_id, &template)
|
||||
.await?;
|
||||
|
||||
let pick_disk = if let Some(hd) = host.disks.first() {
|
||||
hd
|
||||
@ -319,7 +469,6 @@ impl LNVpsProvisioner {
|
||||
// insert custom templates
|
||||
let template_id = self.db.insert_custom_vm_template(&template).await?;
|
||||
|
||||
let client = get_host_client(&host.host, &self.provisioner_config)?;
|
||||
let mut new_vm = Vm {
|
||||
id: 0,
|
||||
host_id: host.host.id,
|
||||
@ -331,54 +480,101 @@ impl LNVpsProvisioner {
|
||||
created: Utc::now(),
|
||||
expires: Utc::now(),
|
||||
disk_id: pick_disk.disk.id,
|
||||
mac_address: "NOT FILLED YET".to_string(),
|
||||
mac_address: "ff:ff:ff:ff:ff:ff".to_string(),
|
||||
deleted: false,
|
||||
ref_code,
|
||||
};
|
||||
|
||||
// ask host client to generate the mac address
|
||||
new_vm.mac_address = client.generate_mac(&new_vm).await?;
|
||||
|
||||
let new_id = self.db.insert_vm(&new_vm).await?;
|
||||
new_vm.id = new_id;
|
||||
Ok(new_vm)
|
||||
}
|
||||
|
||||
/// Create a renewal payment
|
||||
pub async fn renew(&self, vm_id: u64) -> Result<VmPayment> {
|
||||
let pe = PricingEngine::new(self.db.clone(), self.rates.clone());
|
||||
pub async fn renew(&self, vm_id: u64, method: PaymentMethod) -> Result<VmPayment> {
|
||||
let pe = PricingEngine::new(self.db.clone(), self.rates.clone(), self.tax_rates.clone());
|
||||
|
||||
let price = pe.get_vm_cost(vm_id).await?;
|
||||
let price = pe.get_vm_cost(vm_id, method).await?;
|
||||
match price {
|
||||
CostResult::Existing(p) => Ok(p),
|
||||
CostResult::New {
|
||||
msats,
|
||||
amount,
|
||||
currency,
|
||||
time_value,
|
||||
new_expiry,
|
||||
rate,
|
||||
tax,
|
||||
} => {
|
||||
const INVOICE_EXPIRE: u64 = 600;
|
||||
info!("Creating invoice for {vm_id} for {} sats", msats / 1000);
|
||||
let invoice = self
|
||||
.node
|
||||
.add_invoice(AddInvoiceRequest {
|
||||
memo: Some(format!("VM renewal {vm_id} to {new_expiry}")),
|
||||
amount: msats,
|
||||
expire: Some(INVOICE_EXPIRE as u32),
|
||||
})
|
||||
.await?;
|
||||
let vm_payment = VmPayment {
|
||||
id: hex::decode(invoice.payment_hash)?,
|
||||
vm_id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now().add(Duration::from_secs(INVOICE_EXPIRE)),
|
||||
amount: msats,
|
||||
invoice: invoice.pr,
|
||||
time_value,
|
||||
is_paid: false,
|
||||
rate,
|
||||
settle_index: None,
|
||||
let desc = format!("VM renewal {vm_id} to {new_expiry}");
|
||||
let vm_payment = match method {
|
||||
PaymentMethod::Lightning => {
|
||||
ensure!(
|
||||
currency == Currency::BTC,
|
||||
"Cannot create invoices for non-BTC currency"
|
||||
);
|
||||
const INVOICE_EXPIRE: u64 = 600;
|
||||
let total_amount = amount + tax;
|
||||
info!(
|
||||
"Creating invoice for {vm_id} for {} sats",
|
||||
total_amount / 1000
|
||||
);
|
||||
let invoice = self
|
||||
.node
|
||||
.add_invoice(AddInvoiceRequest {
|
||||
memo: Some(desc),
|
||||
amount: total_amount,
|
||||
expire: Some(INVOICE_EXPIRE as u32),
|
||||
})
|
||||
.await?;
|
||||
VmPayment {
|
||||
id: hex::decode(invoice.payment_hash)?,
|
||||
vm_id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now().add(Duration::from_secs(INVOICE_EXPIRE)),
|
||||
amount,
|
||||
tax,
|
||||
currency: currency.to_string(),
|
||||
payment_method: method,
|
||||
time_value,
|
||||
is_paid: false,
|
||||
rate,
|
||||
external_data: invoice.pr,
|
||||
external_id: invoice.external_id,
|
||||
}
|
||||
}
|
||||
PaymentMethod::Revolut => {
|
||||
let rev = if let Some(r) = &self.revolut {
|
||||
r
|
||||
} else {
|
||||
bail!("Revolut not configured")
|
||||
};
|
||||
ensure!(
|
||||
currency != Currency::BTC,
|
||||
"Cannot create revolut orders for BTC currency"
|
||||
);
|
||||
let order = rev
|
||||
.create_order(&desc, CurrencyAmount::from_u64(currency, amount + tax))
|
||||
.await?;
|
||||
let new_id: [u8; 32] = rand::random();
|
||||
VmPayment {
|
||||
id: new_id.to_vec(),
|
||||
vm_id,
|
||||
created: Utc::now(),
|
||||
expires: Utc::now().add(Duration::from_secs(3600)),
|
||||
amount,
|
||||
tax,
|
||||
currency: currency.to_string(),
|
||||
payment_method: method,
|
||||
time_value,
|
||||
is_paid: false,
|
||||
rate,
|
||||
external_data: order.raw_data,
|
||||
external_id: Some(order.external_id),
|
||||
}
|
||||
}
|
||||
PaymentMethod::Paypal => todo!(),
|
||||
};
|
||||
|
||||
self.db.insert_vm_payment(&vm_payment).await?;
|
||||
|
||||
Ok(vm_payment)
|
||||
@ -430,57 +626,18 @@ impl LNVpsProvisioner {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::exchange::DefaultRateCache;
|
||||
use crate::mocks::{MockDb, MockDnsServer, MockNode, MockRouter};
|
||||
use crate::settings::{DnsServerConfig, LightningConfig, QemuConfig, RouterConfig};
|
||||
use crate::exchange::{DefaultRateCache, Ticker};
|
||||
use crate::mocks::{MockDb, MockDnsServer, MockExchangeRate, MockNode, MockRouter};
|
||||
use crate::settings::mock_settings;
|
||||
use lnvps_db::{DiskInterface, DiskType, User, UserSshKey, VmTemplate};
|
||||
use std::net::IpAddr;
|
||||
use std::str::FromStr;
|
||||
|
||||
const ROUTER_BRIDGE: &str = "bridge1";
|
||||
|
||||
fn settings() -> Settings {
|
||||
Settings {
|
||||
listen: None,
|
||||
db: "".to_string(),
|
||||
lightning: LightningConfig::LND {
|
||||
url: "".to_string(),
|
||||
cert: Default::default(),
|
||||
macaroon: Default::default(),
|
||||
},
|
||||
read_only: false,
|
||||
provisioner: ProvisionerConfig::Proxmox {
|
||||
qemu: QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr1".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
vlan: None,
|
||||
kvm: false,
|
||||
},
|
||||
ssh: None,
|
||||
mac_prefix: Some("ff:ff:ff".to_string()),
|
||||
},
|
||||
network_policy: NetworkPolicy {
|
||||
access: NetworkAccessPolicy::StaticArp {
|
||||
interface: ROUTER_BRIDGE.to_string(),
|
||||
},
|
||||
ip6_slaac: None,
|
||||
},
|
||||
delete_after: 0,
|
||||
smtp: None,
|
||||
router: Some(RouterConfig::Mikrotik {
|
||||
url: "https://localhost".to_string(),
|
||||
username: "admin".to_string(),
|
||||
password: "password123".to_string(),
|
||||
}),
|
||||
dns: Some(DnsServerConfig::Cloudflare {
|
||||
token: "abc".to_string(),
|
||||
forward_zone_id: "123".to_string(),
|
||||
reverse_zone_id: "456".to_string(),
|
||||
}),
|
||||
nostr: None,
|
||||
}
|
||||
pub fn settings() -> Settings {
|
||||
let mut settings = mock_settings();
|
||||
settings
|
||||
}
|
||||
|
||||
async fn add_user(db: &Arc<MockDb>) -> Result<(User, UserSshKey)> {
|
||||
@ -504,8 +661,43 @@ mod tests {
|
||||
let settings = settings();
|
||||
let db = Arc::new(MockDb::default());
|
||||
let node = Arc::new(MockNode::default());
|
||||
let rates = Arc::new(DefaultRateCache::default());
|
||||
let router = MockRouter::new(settings.network_policy.clone());
|
||||
let rates = Arc::new(MockExchangeRate::new());
|
||||
const MOCK_RATE: f32 = 69_420.0;
|
||||
rates.set_rate(Ticker::btc_rate("EUR")?, MOCK_RATE).await;
|
||||
|
||||
// add static arp policy
|
||||
{
|
||||
let mut r = db.router.lock().await;
|
||||
r.insert(
|
||||
1,
|
||||
lnvps_db::Router {
|
||||
id: 1,
|
||||
name: "mock-router".to_string(),
|
||||
enabled: true,
|
||||
kind: RouterKind::Mikrotik,
|
||||
url: "https://localhost".to_string(),
|
||||
token: "username:password".to_string(),
|
||||
},
|
||||
);
|
||||
let mut p = db.access_policy.lock().await;
|
||||
p.insert(
|
||||
1,
|
||||
AccessPolicy {
|
||||
id: 1,
|
||||
name: "static-arp".to_string(),
|
||||
kind: NetworkAccessPolicy::StaticArp,
|
||||
router_id: Some(1),
|
||||
interface: Some(ROUTER_BRIDGE.to_string()),
|
||||
},
|
||||
);
|
||||
let mut i = db.ip_range.lock().await;
|
||||
let r = i.get_mut(&1).unwrap();
|
||||
r.access_policy_id = Some(1);
|
||||
r.reverse_zone_id = Some("mock-rev-zone-id".to_string());
|
||||
let r = i.get_mut(&2).unwrap();
|
||||
r.reverse_zone_id = Some("mock-v6-rev-zone-id".to_string());
|
||||
}
|
||||
|
||||
let dns = MockDnsServer::new();
|
||||
let provisioner = LNVpsProvisioner::new(settings, db.clone(), node.clone(), rates.clone());
|
||||
|
||||
@ -514,9 +706,25 @@ mod tests {
|
||||
.provision(user.id, 1, 1, ssh_key.id, Some("mock-ref".to_string()))
|
||||
.await?;
|
||||
println!("{:?}", vm);
|
||||
|
||||
// renew vm
|
||||
let payment = provisioner.renew(vm.id, PaymentMethod::Lightning).await?;
|
||||
assert_eq!(vm.id, payment.vm_id);
|
||||
assert_eq!(payment.tax, (payment.amount as f64 * 0.01).floor() as u64);
|
||||
|
||||
// check invoice amount matches amount+tax
|
||||
let inv = node.invoices.lock().await;
|
||||
if let Some(i) = inv.get(&hex::encode(payment.id)) {
|
||||
assert_eq!(i.amount, payment.amount + payment.tax);
|
||||
} else {
|
||||
bail!("Invoice doesnt exist");
|
||||
}
|
||||
|
||||
// spawn vm
|
||||
provisioner.spawn_vm(vm.id).await?;
|
||||
|
||||
// check resources
|
||||
let router = MockRouter::new();
|
||||
let arp = router.list_arp_entry().await?;
|
||||
assert_eq!(1, arp.len());
|
||||
let arp = arp.first().unwrap();
|
||||
@ -526,22 +734,47 @@ mod tests {
|
||||
println!("{:?}", arp);
|
||||
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
assert_eq!(1, ips.len());
|
||||
let ip = ips.first().unwrap();
|
||||
println!("{:?}", ip);
|
||||
assert_eq!(ip.ip, arp.address);
|
||||
assert_eq!(ip.ip_range_id, 1);
|
||||
assert_eq!(ip.vm_id, vm.id);
|
||||
assert!(ip.dns_forward.is_some());
|
||||
assert!(ip.dns_reverse.is_some());
|
||||
assert!(ip.dns_reverse_ref.is_some());
|
||||
assert!(ip.dns_forward_ref.is_some());
|
||||
assert_eq!(ip.dns_reverse, ip.dns_forward);
|
||||
assert_eq!(2, ips.len());
|
||||
|
||||
// lookup v4 ip
|
||||
let v4 = ips.iter().find(|r| r.ip_range_id == 1).unwrap();
|
||||
println!("{:?}", v4);
|
||||
assert_eq!(v4.ip, arp.address);
|
||||
assert_eq!(v4.ip_range_id, 1);
|
||||
assert_eq!(v4.vm_id, vm.id);
|
||||
assert!(v4.dns_forward.is_some());
|
||||
assert!(v4.dns_reverse.is_some());
|
||||
assert!(v4.dns_reverse_ref.is_some());
|
||||
assert!(v4.dns_forward_ref.is_some());
|
||||
assert_eq!(v4.dns_reverse, v4.dns_forward);
|
||||
|
||||
// assert IP address is not CIDR
|
||||
assert!(IpAddr::from_str(&ip.ip).is_ok());
|
||||
assert!(!ip.ip.ends_with("/8"));
|
||||
assert!(!ip.ip.ends_with("/24"));
|
||||
assert!(IpAddr::from_str(&v4.ip).is_ok());
|
||||
assert!(!v4.ip.ends_with("/8"));
|
||||
assert!(!v4.ip.ends_with("/24"));
|
||||
|
||||
// lookup v6 ip
|
||||
let v6 = ips.iter().find(|r| r.ip_range_id == 2).unwrap();
|
||||
println!("{:?}", v6);
|
||||
assert_eq!(v6.ip_range_id, 2);
|
||||
assert_eq!(v6.vm_id, vm.id);
|
||||
assert!(v6.dns_forward.is_some());
|
||||
assert!(v6.dns_reverse.is_some());
|
||||
assert!(v6.dns_reverse_ref.is_some());
|
||||
assert!(v6.dns_forward_ref.is_some());
|
||||
assert_eq!(v6.dns_reverse, v6.dns_forward);
|
||||
|
||||
// test zones have dns entries
|
||||
{
|
||||
let zones = dns.zones.lock().await;
|
||||
assert_eq!(zones.get("mock-rev-zone-id").unwrap().len(), 1);
|
||||
assert_eq!(zones.get("mock-v6-rev-zone-id").unwrap().len(), 1);
|
||||
assert_eq!(zones.get("mock-forward-zone-id").unwrap().len(), 2);
|
||||
|
||||
let v6 = zones.get("mock-v6-rev-zone-id").unwrap().iter().next().unwrap();
|
||||
assert_eq!(v6.1.kind, "PTR");
|
||||
assert!(v6.1.name.ends_with("0.0.d.f.ip6.arpa"));
|
||||
}
|
||||
|
||||
// now expire
|
||||
provisioner.delete_vm(vm.id).await?;
|
||||
@ -549,19 +782,25 @@ mod tests {
|
||||
// test arp/dns is removed
|
||||
let arp = router.list_arp_entry().await?;
|
||||
assert!(arp.is_empty());
|
||||
assert_eq!(dns.forward.lock().await.len(), 0);
|
||||
assert_eq!(dns.reverse.lock().await.len(), 0);
|
||||
|
||||
// test dns entries are deleted
|
||||
{
|
||||
let zones = dns.zones.lock().await;
|
||||
assert_eq!(zones.get("mock-rev-zone-id").unwrap().len(), 0);
|
||||
assert_eq!(zones.get("mock-forward-zone-id").unwrap().len(), 0);
|
||||
}
|
||||
|
||||
// ensure IPS are deleted
|
||||
let ips = db.ip_assignments.lock().await;
|
||||
let ip = ips.values().next().unwrap();
|
||||
assert!(ip.arp_ref.is_none());
|
||||
assert!(ip.dns_forward.is_none());
|
||||
assert!(ip.dns_reverse.is_none());
|
||||
assert!(ip.dns_reverse_ref.is_none());
|
||||
assert!(ip.dns_forward_ref.is_none());
|
||||
assert!(ip.deleted);
|
||||
println!("{:?}", ip);
|
||||
let ips = db.list_vm_ip_assignments(vm.id).await?;
|
||||
for ip in ips {
|
||||
println!("{:?}", ip);
|
||||
assert!(ip.arp_ref.is_none());
|
||||
assert!(ip.dns_forward.is_none());
|
||||
assert!(ip.dns_reverse.is_none());
|
||||
assert!(ip.dns_reverse_ref.is_none());
|
||||
assert!(ip.dns_forward_ref.is_none());
|
||||
assert!(ip.deleted);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -581,8 +820,8 @@ mod tests {
|
||||
created: Default::default(),
|
||||
expires: None,
|
||||
cpu: 64,
|
||||
memory: 512 * MockDb::GB,
|
||||
disk_size: 20 * MockDb::TB,
|
||||
memory: 512 * crate::GB,
|
||||
disk_size: 20 * crate::TB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: DiskInterface::PCIe,
|
||||
cost_plan_id: 1,
|
||||
|
@ -59,4 +59,4 @@ impl Template for VmCustomTemplate {
|
||||
fn disk_interface(&self) -> DiskInterface {
|
||||
self.disk_interface
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,89 +1,179 @@
|
||||
use anyhow::{bail, Result};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use anyhow::{bail, Context, Result};
|
||||
use clap::builder::TypedValueParser;
|
||||
use ipnetwork::{IpNetwork, Ipv6Network};
|
||||
use lnvps_db::{IpRange, IpRangeAllocationMode, LNVpsDb};
|
||||
use log::warn;
|
||||
use rand::prelude::IteratorRandom;
|
||||
use rocket::form::validate::Contains;
|
||||
use rocket::http::ext::IntoCollection;
|
||||
use std::collections::HashSet;
|
||||
use std::net::IpAddr;
|
||||
use std::net::{IpAddr, Ipv6Addr};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum ProvisionerMethod {
|
||||
Sequential,
|
||||
Random,
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AvailableIps {
|
||||
pub ip4: Option<AvailableIp>,
|
||||
pub ip6: Option<AvailableIp>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AvailableIp {
|
||||
pub ip: IpAddr,
|
||||
pub ip: IpNetwork,
|
||||
pub gateway: IpNetwork,
|
||||
pub range_id: u64,
|
||||
pub region_id: u64,
|
||||
pub mode: IpRangeAllocationMode,
|
||||
}
|
||||
|
||||
/// Handles picking available IPs
|
||||
#[derive(Clone)]
|
||||
pub struct NetworkProvisioner {
|
||||
method: ProvisionerMethod,
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
}
|
||||
|
||||
impl NetworkProvisioner {
|
||||
pub fn new(method: ProvisionerMethod, db: Arc<dyn LNVpsDb>) -> Self {
|
||||
Self { method, db }
|
||||
pub fn new(db: Arc<dyn LNVpsDb>) -> Self {
|
||||
Self { db }
|
||||
}
|
||||
|
||||
/// Pick an IP from one of the available ip ranges
|
||||
/// This method MUST return a free IP which can be used
|
||||
pub async fn pick_ip_for_region(&self, region_id: u64) -> Result<AvailableIp> {
|
||||
pub async fn pick_ip_for_region(&self, region_id: u64) -> Result<AvailableIps> {
|
||||
let ip_ranges = self.db.list_ip_range_in_region(region_id).await?;
|
||||
if ip_ranges.is_empty() {
|
||||
bail!("No ip range found in this region");
|
||||
}
|
||||
|
||||
let mut ret = AvailableIps {
|
||||
ip4: None,
|
||||
ip6: None,
|
||||
};
|
||||
for range in ip_ranges {
|
||||
let range_cidr: IpNetwork = range.cidr.parse()?;
|
||||
let ips = self.db.list_vm_ip_assignments_in_range(range.id).await?;
|
||||
let mut ips: HashSet<IpAddr> = ips.iter().map_while(|i| i.ip.parse().ok()).collect();
|
||||
if ret.ip4.is_none() && range_cidr.is_ipv4() {
|
||||
ret.ip4 = match self.pick_ip_from_range(&range).await {
|
||||
Ok(i) => Some(i),
|
||||
Err(e) => {
|
||||
warn!("Failed to pick ip range: {} {}", range.cidr, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
if ret.ip6.is_none() && range_cidr.is_ipv6() {
|
||||
ret.ip6 = match self.pick_ip_from_range(&range).await {
|
||||
Ok(i) => Some(i),
|
||||
Err(e) => {
|
||||
warn!("Failed to pick ip range: {} {}", range.cidr, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ret.ip4.is_none() && ret.ip6.is_none() {
|
||||
bail!("No IPs available in this region");
|
||||
} else {
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
let gateway: IpNetwork = range.gateway.parse()?;
|
||||
pub async fn pick_ip_from_range(&self, range: &IpRange) -> Result<AvailableIp> {
|
||||
let range_cidr: IpNetwork = range.cidr.parse()?;
|
||||
let ips = self.db.list_vm_ip_assignments_in_range(range.id).await?;
|
||||
let mut ips: HashSet<IpAddr> = ips.iter().map_while(|i| i.ip.parse().ok()).collect();
|
||||
|
||||
// mark some IPS as always used
|
||||
// Namely:
|
||||
// .0 & .255 of /24 (first and last)
|
||||
// gateway ip of the range
|
||||
let gateway: IpNetwork = range.gateway.parse()?;
|
||||
|
||||
// mark some IPS as always used
|
||||
// Namely:
|
||||
// .0 & .255 of /24 (first and last)
|
||||
// gateway ip of the range
|
||||
if !range.use_full_range && range_cidr.is_ipv4() {
|
||||
ips.insert(range_cidr.iter().next().unwrap());
|
||||
ips.insert(range_cidr.iter().last().unwrap());
|
||||
ips.insert(gateway.ip());
|
||||
}
|
||||
ips.insert(gateway.ip());
|
||||
|
||||
// pick an IP at random
|
||||
let ip_pick = {
|
||||
match self.method {
|
||||
ProvisionerMethod::Sequential => range_cidr.iter().find(|i| !ips.contains(i)),
|
||||
ProvisionerMethod::Random => {
|
||||
let mut rng = rand::rng();
|
||||
loop {
|
||||
if let Some(i) = range_cidr.iter().choose(&mut rng) {
|
||||
if !ips.contains(&i) {
|
||||
break Some(i);
|
||||
}
|
||||
} else {
|
||||
break None;
|
||||
// pick an IP from the range
|
||||
let ip_pick = {
|
||||
match &range.allocation_mode {
|
||||
IpRangeAllocationMode::Sequential => range_cidr
|
||||
.iter()
|
||||
.find(|i| !ips.contains(i))
|
||||
.and_then(|i| IpNetwork::new(i, range_cidr.prefix()).ok()),
|
||||
IpRangeAllocationMode::Random => {
|
||||
let mut rng = rand::rng();
|
||||
loop {
|
||||
if let Some(i) = range_cidr.iter().choose(&mut rng) {
|
||||
if !ips.contains(&i) {
|
||||
break IpNetwork::new(i, range_cidr.prefix()).ok();
|
||||
}
|
||||
} else {
|
||||
break None;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(ip_pick) = ip_pick {
|
||||
return Ok(AvailableIp {
|
||||
range_id: range.id,
|
||||
gateway,
|
||||
ip: ip_pick,
|
||||
region_id,
|
||||
});
|
||||
IpRangeAllocationMode::SlaacEui64 => {
|
||||
if range_cidr.network().is_ipv4() {
|
||||
bail!("Cannot create EUI-64 from IPv4 address")
|
||||
} else {
|
||||
// basically always free ips here
|
||||
Some(range_cidr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
bail!("No IPs available in this region");
|
||||
.context("No ips available in range")?;
|
||||
|
||||
Ok(AvailableIp {
|
||||
range_id: range.id,
|
||||
gateway,
|
||||
ip: ip_pick,
|
||||
region_id: range.region_id,
|
||||
mode: range.allocation_mode.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn calculate_eui64(mac: &[u8; 6], prefix: &IpNetwork) -> Result<IpAddr> {
|
||||
if prefix.is_ipv4() {
|
||||
bail!("Prefix must be IPv6".to_string())
|
||||
}
|
||||
|
||||
let mut eui64 = [0u8; 8];
|
||||
eui64[0] = mac[0] ^ 0x02;
|
||||
eui64[1] = mac[1];
|
||||
eui64[2] = mac[2];
|
||||
eui64[3] = 0xFF;
|
||||
eui64[4] = 0xFE;
|
||||
eui64[5] = mac[3];
|
||||
eui64[6] = mac[4];
|
||||
eui64[7] = mac[5];
|
||||
|
||||
// Combine prefix with EUI-64 interface identifier
|
||||
let mut prefix_bytes = match prefix.network() {
|
||||
IpAddr::V4(_) => bail!("Not supported"),
|
||||
IpAddr::V6(v6) => v6.octets(),
|
||||
};
|
||||
// copy EUI-64 into prefix
|
||||
prefix_bytes[8..16].copy_from_slice(&eui64);
|
||||
|
||||
let ipv6_addr = Ipv6Addr::from(prefix_bytes);
|
||||
Ok(IpAddr::V6(ipv6_addr))
|
||||
}
|
||||
|
||||
pub fn parse_mac(mac: &str) -> Result<[u8; 6]> {
|
||||
Ok(hex::decode(mac.replace(":", ""))?.as_slice().try_into()?)
|
||||
}
|
||||
|
||||
pub fn ipv6_to_ptr(addr: &Ipv6Addr) -> Result<String> {
|
||||
let octets = addr.octets();
|
||||
let mut nibbles = Vec::new();
|
||||
for byte in octets.iter().rev() {
|
||||
let high_nibble = (byte >> 4) & 0x0Fu8;
|
||||
let low_nibble = byte & 0x0F;
|
||||
nibbles.push(format!("{:x}", low_nibble));
|
||||
nibbles.push(format!("{:x}", high_nibble));
|
||||
}
|
||||
Ok(format!("{}.ip6.arpa", nibbles.join(".")))
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,38 +188,44 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn pick_seq_ip_for_region_test() {
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(MockDb::default());
|
||||
let mgr = NetworkProvisioner::new(ProvisionerMethod::Sequential, db.clone());
|
||||
let mgr = NetworkProvisioner::new(db.clone());
|
||||
|
||||
let mac: [u8; 6] = [0xff, 0xff, 0xff, 0xfa, 0xfb, 0xfc];
|
||||
let gateway = IpNetwork::from_str("10.0.0.1/8").unwrap();
|
||||
let first = IpAddr::from_str("10.0.0.2").unwrap();
|
||||
let second = IpAddr::from_str("10.0.0.3").unwrap();
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(1, ip.region_id);
|
||||
assert_eq!(first, ip.ip);
|
||||
assert_eq!(gateway, ip.gateway);
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(v4.region_id, 1);
|
||||
assert_eq!(first, v4.ip.ip());
|
||||
assert_eq!(gateway, v4.gateway);
|
||||
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(1, ip.region_id);
|
||||
assert_eq!(first, ip.ip);
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(1, v4.region_id);
|
||||
assert_eq!(first, v4.ip.ip());
|
||||
db.insert_vm_ip_assignment(&VmIpAssignment {
|
||||
id: 0,
|
||||
vm_id: 0,
|
||||
ip_range_id: ip.range_id,
|
||||
ip: ip.ip.to_string(),
|
||||
ip_range_id: v4.range_id,
|
||||
ip: v4.ip.ip().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.expect("Could not insert vm ip");
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(second, ip.ip);
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(second, v4.ip.ip());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pick_rng_ip_for_region_test() {
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(MockDb::default());
|
||||
let mgr = NetworkProvisioner::new(ProvisionerMethod::Random, db);
|
||||
let mgr = NetworkProvisioner::new(db);
|
||||
|
||||
let mac: [u8; 6] = [0xff, 0xff, 0xff, 0xfa, 0xfb, 0xfc];
|
||||
let ip = mgr.pick_ip_for_region(1).await.expect("No ip found in db");
|
||||
assert_eq!(1, ip.region_id);
|
||||
let v4 = ip.ip4.unwrap();
|
||||
assert_eq!(1, v4.region_id);
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,13 @@
|
||||
use crate::exchange::{Currency, ExchangeRateService, Ticker};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use crate::exchange::{Currency, CurrencyAmount, ExchangeRateService, Ticker, TickerRate};
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::{DateTime, Days, Months, TimeDelta, Utc};
|
||||
use ipnetwork::IpNetwork;
|
||||
use lnvps_db::{LNVpsDb, Vm, VmCostPlan, VmCostPlanIntervalType, VmCustomTemplate, VmPayment};
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::{
|
||||
LNVpsDb, PaymentMethod, Vm, VmCostPlan, VmCostPlanIntervalType, VmCustomTemplate, VmPayment,
|
||||
};
|
||||
use log::info;
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Add;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@ -14,36 +18,39 @@ use std::sync::Arc;
|
||||
pub struct PricingEngine {
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
tax_rates: HashMap<CountryCode, f32>,
|
||||
}
|
||||
|
||||
impl PricingEngine {
|
||||
/// SATS per BTC
|
||||
const BTC_SATS: f64 = 100_000_000.0;
|
||||
const KB: u64 = 1024;
|
||||
const MB: u64 = Self::KB * 1024;
|
||||
const GB: u64 = Self::MB * 1024;
|
||||
|
||||
pub fn new(db: Arc<dyn LNVpsDb>, rates: Arc<dyn ExchangeRateService>) -> Self {
|
||||
Self { db, rates }
|
||||
pub fn new(
|
||||
db: Arc<dyn LNVpsDb>,
|
||||
rates: Arc<dyn ExchangeRateService>,
|
||||
tax_rates: HashMap<CountryCode, f32>,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
rates,
|
||||
tax_rates,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get VM cost (for renewal)
|
||||
pub async fn get_vm_cost(&self, vm_id: u64) -> Result<CostResult> {
|
||||
pub async fn get_vm_cost(&self, vm_id: u64, method: PaymentMethod) -> Result<CostResult> {
|
||||
let vm = self.db.get_vm(vm_id).await?;
|
||||
|
||||
// Reuse existing payment until expired
|
||||
let payments = self.db.list_vm_payment(vm.id).await?;
|
||||
if let Some(px) = payments
|
||||
.into_iter()
|
||||
.find(|p| p.expires > Utc::now() && !p.is_paid)
|
||||
.find(|p| p.expires > Utc::now() && !p.is_paid && p.payment_method == method)
|
||||
{
|
||||
return Ok(CostResult::Existing(px));
|
||||
}
|
||||
|
||||
if vm.template_id.is_some() {
|
||||
Ok(self.get_template_vm_cost(&vm).await?)
|
||||
Ok(self.get_template_vm_cost(&vm, method).await?)
|
||||
} else {
|
||||
Ok(self.get_custom_vm_cost(&vm).await?)
|
||||
Ok(self.get_custom_vm_cost(&vm, method).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,9 +87,9 @@ impl PricingEngine {
|
||||
} else {
|
||||
bail!("No disk price found")
|
||||
};
|
||||
let disk_cost = (template.disk_size / Self::GB) as f32 * disk_pricing.cost;
|
||||
let disk_cost = (template.disk_size / crate::GB) as f32 * disk_pricing.cost;
|
||||
let cpu_cost = pricing.cpu_cost * template.cpu as f32;
|
||||
let memory_cost = pricing.memory_cost * (template.memory / Self::GB) as f32;
|
||||
let memory_cost = pricing.memory_cost * (template.memory / crate::GB) as f32;
|
||||
let ip4_cost = pricing.ip4_cost * v4s as f32;
|
||||
let ip6_cost = pricing.ip6_cost * v6s as f32;
|
||||
|
||||
@ -101,7 +108,7 @@ impl PricingEngine {
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_custom_vm_cost(&self, vm: &Vm) -> Result<CostResult> {
|
||||
async fn get_custom_vm_cost(&self, vm: &Vm, method: PaymentMethod) -> Result<CostResult> {
|
||||
let template_id = if let Some(i) = vm.custom_template_id {
|
||||
i
|
||||
} else {
|
||||
@ -114,26 +121,49 @@ impl PricingEngine {
|
||||
|
||||
// custom templates are always 1-month intervals
|
||||
let time_value = (vm.expires.add(Months::new(1)) - vm.expires).num_seconds() as u64;
|
||||
let (cost_msats, rate) = self.get_msats_amount(price.currency, price.total()).await?;
|
||||
let (currency, amount, rate) = self
|
||||
.get_amount_and_rate(
|
||||
CurrencyAmount::from_f32(price.currency, price.total()),
|
||||
method,
|
||||
)
|
||||
.await?;
|
||||
Ok(CostResult::New {
|
||||
msats: cost_msats,
|
||||
amount,
|
||||
tax: self.get_tax_for_user(vm.user_id, amount).await?,
|
||||
currency,
|
||||
rate,
|
||||
time_value,
|
||||
new_expiry: vm.expires.add(TimeDelta::seconds(time_value as i64)),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_msats_amount(&self, currency: Currency, amount: f32) -> Result<(u64, f32)> {
|
||||
async fn get_tax_for_user(&self, user_id: u64, amount: u64) -> Result<u64> {
|
||||
let user = self.db.get_user(user_id).await?;
|
||||
if let Some(cc) = user
|
||||
.country_code
|
||||
.and_then(|c| CountryCode::for_alpha3(&c).ok())
|
||||
{
|
||||
if let Some(c) = self.tax_rates.get(&cc) {
|
||||
return Ok((amount as f64 * (*c as f64 / 100f64)).floor() as u64);
|
||||
}
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
async fn get_ticker(&self, currency: Currency) -> Result<TickerRate> {
|
||||
let ticker = Ticker(Currency::BTC, currency);
|
||||
let rate = if let Some(r) = self.rates.get_rate(ticker).await {
|
||||
r
|
||||
if let Some(r) = self.rates.get_rate(ticker).await {
|
||||
Ok(TickerRate(ticker, r))
|
||||
} else {
|
||||
bail!("No exchange rate found")
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
let cost_btc = amount / rate;
|
||||
let cost_msats = (cost_btc as f64 * Self::BTC_SATS) as u64 * 1000;
|
||||
Ok((cost_msats, rate))
|
||||
async fn get_msats_amount(&self, amount: CurrencyAmount) -> Result<(u64, f32)> {
|
||||
let rate = self.get_ticker(amount.0).await?;
|
||||
let cost_btc = amount.value_f32() / rate.1;
|
||||
let cost_msats = (cost_btc as f64 * crate::BTC_SATS) as u64 * 1000;
|
||||
Ok((cost_msats, rate.1))
|
||||
}
|
||||
|
||||
fn next_template_expire(vm: &Vm, cost_plan: &VmCostPlan) -> u64 {
|
||||
@ -150,7 +180,7 @@ impl PricingEngine {
|
||||
(next_expire - vm.expires).num_seconds() as u64
|
||||
}
|
||||
|
||||
async fn get_template_vm_cost(&self, vm: &Vm) -> Result<CostResult> {
|
||||
async fn get_template_vm_cost(&self, vm: &Vm, method: PaymentMethod) -> Result<CostResult> {
|
||||
let template_id = if let Some(i) = vm.template_id {
|
||||
i
|
||||
} else {
|
||||
@ -159,20 +189,37 @@ impl PricingEngine {
|
||||
let template = self.db.get_vm_template(template_id).await?;
|
||||
let cost_plan = self.db.get_cost_plan(template.cost_plan_id).await?;
|
||||
|
||||
let (cost_msats, rate) = self
|
||||
.get_msats_amount(
|
||||
cost_plan.currency.parse().expect("Invalid currency"),
|
||||
cost_plan.amount,
|
||||
)
|
||||
let currency = cost_plan.currency.parse().expect("Invalid currency");
|
||||
let (currency, amount, rate) = self
|
||||
.get_amount_and_rate(CurrencyAmount::from_f32(currency, cost_plan.amount), method)
|
||||
.await?;
|
||||
let time_value = Self::next_template_expire(&vm, &cost_plan);
|
||||
let time_value = Self::next_template_expire(vm, &cost_plan);
|
||||
Ok(CostResult::New {
|
||||
msats: cost_msats,
|
||||
amount,
|
||||
tax: self.get_tax_for_user(vm.user_id, amount).await?,
|
||||
currency,
|
||||
rate,
|
||||
time_value,
|
||||
new_expiry: vm.expires.add(TimeDelta::seconds(time_value as i64)),
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_amount_and_rate(
|
||||
&self,
|
||||
list_price: CurrencyAmount,
|
||||
method: PaymentMethod,
|
||||
) -> Result<(Currency, u64, f32)> {
|
||||
Ok(match (list_price.0, method) {
|
||||
(c, PaymentMethod::Lightning) if c != Currency::BTC => {
|
||||
let new_price = self.get_msats_amount(list_price).await?;
|
||||
(Currency::BTC, new_price.0, new_price.1)
|
||||
}
|
||||
(cur, PaymentMethod::Revolut) if cur != Currency::BTC => {
|
||||
(cur, list_price.value(), 0.01)
|
||||
}
|
||||
(c, m) => bail!("Cannot create payment for method {} and currency {}", m, c),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -181,14 +228,18 @@ pub enum CostResult {
|
||||
Existing(VmPayment),
|
||||
/// A new payment can be created with the specified amount
|
||||
New {
|
||||
/// The cost in milli-sats
|
||||
msats: u64,
|
||||
/// The cost
|
||||
amount: u64,
|
||||
/// Currency
|
||||
currency: Currency,
|
||||
/// The exchange rate used to calculate the price
|
||||
rate: f32,
|
||||
/// The time to extend the vm expiry in seconds
|
||||
time_value: u64,
|
||||
/// The absolute expiry time of the vm if renewed
|
||||
new_expiry: DateTime<Utc>,
|
||||
/// Taxes to charge
|
||||
tax: u64,
|
||||
},
|
||||
}
|
||||
|
||||
@ -212,8 +263,7 @@ impl PricingData {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::mocks::{MockDb, MockExchangeRate};
|
||||
use lnvps_db::{DiskType, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate};
|
||||
const GB: u64 = 1024 * 1024 * 1024;
|
||||
use lnvps_db::{DiskType, User, VmCustomPricing, VmCustomPricingDisk, VmCustomTemplate};
|
||||
const MOCK_RATE: f32 = 100_000.0;
|
||||
|
||||
async fn add_custom_pricing(db: &MockDb) {
|
||||
@ -240,8 +290,8 @@ mod tests {
|
||||
VmCustomTemplate {
|
||||
id: 1,
|
||||
cpu: 2,
|
||||
memory: 2 * GB,
|
||||
disk_size: 80 * GB,
|
||||
memory: 2 * crate::GB,
|
||||
disk_size: 80 * crate::GB,
|
||||
disk_type: DiskType::SSD,
|
||||
disk_interface: Default::default(),
|
||||
pricing_id: 1,
|
||||
@ -287,19 +337,67 @@ mod tests {
|
||||
{
|
||||
let mut v = db.vms.lock().await;
|
||||
v.insert(1, MockDb::mock_vm());
|
||||
v.insert(
|
||||
2,
|
||||
Vm {
|
||||
user_id: 2,
|
||||
..MockDb::mock_vm()
|
||||
},
|
||||
);
|
||||
|
||||
let mut u = db.users.lock().await;
|
||||
u.insert(
|
||||
1,
|
||||
User {
|
||||
id: 1,
|
||||
pubkey: vec![],
|
||||
created: Default::default(),
|
||||
email: None,
|
||||
contact_nip17: false,
|
||||
contact_email: false,
|
||||
country_code: Some("USA".to_string()),
|
||||
},
|
||||
);
|
||||
u.insert(
|
||||
2,
|
||||
User {
|
||||
id: 2,
|
||||
pubkey: vec![],
|
||||
created: Default::default(),
|
||||
email: None,
|
||||
contact_nip17: false,
|
||||
contact_email: false,
|
||||
country_code: Some("IRL".to_string()),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let db: Arc<dyn LNVpsDb> = Arc::new(db);
|
||||
|
||||
let pe = PricingEngine::new(db.clone(), rates);
|
||||
let price = pe.get_vm_cost(1).await?;
|
||||
let taxes = HashMap::from([(CountryCode::IRL, 23.0)]);
|
||||
|
||||
let pe = PricingEngine::new(db.clone(), rates, taxes);
|
||||
let plan = MockDb::mock_cost_plan();
|
||||
|
||||
let price = pe.get_vm_cost(1, PaymentMethod::Lightning).await?;
|
||||
match price {
|
||||
CostResult::Existing(_) => bail!("??"),
|
||||
CostResult::New { msats, .. } => {
|
||||
CostResult::New { amount, tax, .. } => {
|
||||
let expect_price = (plan.amount / MOCK_RATE * 1.0e11) as u64;
|
||||
assert_eq!(expect_price, msats);
|
||||
assert_eq!(expect_price, amount);
|
||||
assert_eq!(0, tax);
|
||||
}
|
||||
_ => bail!("??"),
|
||||
}
|
||||
|
||||
// with taxes
|
||||
let price = pe.get_vm_cost(2, PaymentMethod::Lightning).await?;
|
||||
match price {
|
||||
CostResult::New { amount, tax, .. } => {
|
||||
let expect_price = (plan.amount / MOCK_RATE * 1.0e11) as u64;
|
||||
assert_eq!(expect_price, amount);
|
||||
assert_eq!((expect_price as f64 * 0.23).floor() as u64, tax);
|
||||
}
|
||||
_ => bail!("??"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -19,13 +19,18 @@ impl MikrotikRouter {
|
||||
STANDARD.encode(format!("{}:{}", username, password))
|
||||
);
|
||||
Self {
|
||||
api: JsonApi::token(url, &auth).unwrap(),
|
||||
api: JsonApi::token(url, &auth, true).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Router for MikrotikRouter {
|
||||
async fn generate_mac(&self, _ip: &str, _comment: &str) -> Result<Option<ArpEntry>> {
|
||||
// Mikrotik router doesn't care what MAC address you use
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn list_arp_entry(&self) -> Result<Vec<ArpEntry>> {
|
||||
let rsp: Vec<MikrotikArpEntry> = self.api.req(Method::GET, "/rest/ip/arp", ()).await?;
|
||||
Ok(rsp.into_iter().map(|e| e.into()).collect())
|
||||
@ -64,7 +69,7 @@ impl Router for MikrotikRouter {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MikrotikArpEntry {
|
||||
struct MikrotikArpEntry {
|
||||
#[serde(rename = ".id")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
|
@ -11,6 +11,8 @@ use rocket::async_trait;
|
||||
/// It also prevents people from re-assigning their IP to another in the range,
|
||||
#[async_trait]
|
||||
pub trait Router: Send + Sync {
|
||||
/// Generate mac address for a given IP address
|
||||
async fn generate_mac(&self, ip: &str, comment: &str) -> Result<Option<ArpEntry>>;
|
||||
async fn list_arp_entry(&self) -> Result<Vec<ArpEntry>>;
|
||||
async fn add_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry>;
|
||||
async fn remove_arp_entry(&self, id: &str) -> Result<()>;
|
||||
@ -40,5 +42,9 @@ impl ArpEntry {
|
||||
|
||||
#[cfg(feature = "mikrotik")]
|
||||
mod mikrotik;
|
||||
mod ovh;
|
||||
|
||||
#[cfg(feature = "mikrotik")]
|
||||
pub use mikrotik::*;
|
||||
pub use mikrotik::MikrotikRouter;
|
||||
pub use ovh::OvhDedicatedServerVMacRouter;
|
||||
|
||||
|
355
src/router/ovh.rs
Normal file
355
src/router/ovh.rs
Normal file
@ -0,0 +1,355 @@
|
||||
use crate::json_api::{JsonApi, TokenGen};
|
||||
use crate::router::{ArpEntry, Router};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use lnvps_db::async_trait;
|
||||
use log::{info, warn};
|
||||
use nostr::hashes::{sha1, Hash};
|
||||
use nostr::Url;
|
||||
use reqwest::header::{HeaderName, HeaderValue, ACCEPT};
|
||||
use reqwest::{Method, RequestBuilder};
|
||||
use rocket::form::validate::Contains;
|
||||
use rocket::serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::ops::Sub;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// This router is not really a router, but it allows
|
||||
/// managing the virtual mac's for additional IPs on OVH dedicated servers
|
||||
pub struct OvhDedicatedServerVMacRouter {
|
||||
name: String,
|
||||
api: JsonApi,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct OvhTokenGen {
|
||||
time_delta: i64,
|
||||
application_key: String,
|
||||
application_secret: String,
|
||||
consumer_key: String,
|
||||
}
|
||||
|
||||
impl OvhTokenGen {
|
||||
pub fn new(time_delta: i64, token: &str) -> Result<Self> {
|
||||
let mut t_split = token.split(":");
|
||||
Ok(Self {
|
||||
time_delta,
|
||||
application_key: t_split
|
||||
.next()
|
||||
.context("Missing application_key")?
|
||||
.to_string(),
|
||||
application_secret: t_split
|
||||
.next()
|
||||
.context("Missing application_secret")?
|
||||
.to_string(),
|
||||
consumer_key: t_split.next().context("Missing consumer_key")?.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Compute signature for OVH.
|
||||
fn build_sig(
|
||||
method: &str,
|
||||
query: &str,
|
||||
body: &str,
|
||||
timestamp: &str,
|
||||
aas: &str,
|
||||
ck: &str,
|
||||
) -> String {
|
||||
let sep = "+";
|
||||
let prefix = "$1$".to_string();
|
||||
|
||||
let capacity = 1
|
||||
+ aas.len()
|
||||
+ sep.len()
|
||||
+ ck.len()
|
||||
+ method.len()
|
||||
+ sep.len()
|
||||
+ query.len()
|
||||
+ sep.len()
|
||||
+ body.len()
|
||||
+ sep.len()
|
||||
+ timestamp.len();
|
||||
let mut signature = String::with_capacity(capacity);
|
||||
signature.push_str(aas);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(ck);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(method);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(query);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(body);
|
||||
signature.push_str(sep);
|
||||
signature.push_str(timestamp);
|
||||
|
||||
// debug!("Signature: {}", &signature);
|
||||
let sha1: sha1::Hash = Hash::hash(signature.as_bytes());
|
||||
let sig = hex::encode(sha1);
|
||||
prefix + &sig
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenGen for OvhTokenGen {
|
||||
fn generate_token(
|
||||
&self,
|
||||
method: Method,
|
||||
url: &Url,
|
||||
body: Option<&str>,
|
||||
req: RequestBuilder,
|
||||
) -> Result<RequestBuilder> {
|
||||
let now = Utc::now().timestamp().sub(self.time_delta);
|
||||
let now_string = now.to_string();
|
||||
let sig = Self::build_sig(
|
||||
method.as_str(),
|
||||
url.as_str(),
|
||||
body.unwrap_or(""),
|
||||
now_string.as_str(),
|
||||
&self.application_secret,
|
||||
&self.consumer_key,
|
||||
);
|
||||
Ok(req
|
||||
.header("X-Ovh-Application", &self.application_key)
|
||||
.header("X-Ovh-Consumer", &self.consumer_key)
|
||||
.header("X-Ovh-Timestamp", now_string)
|
||||
.header("X-Ovh-Signature", sig))
|
||||
}
|
||||
}
|
||||
|
||||
impl OvhDedicatedServerVMacRouter {
|
||||
pub async fn new(url: &str, name: &str, token: &str) -> Result<Self> {
|
||||
// load API time delta
|
||||
let time_api = JsonApi::new(url)?;
|
||||
let time = time_api.get_raw("v1/auth/time").await?;
|
||||
let delta: i64 = Utc::now().timestamp().sub(time.parse::<i64>()?);
|
||||
|
||||
Ok(Self {
|
||||
name: name.to_string(),
|
||||
api: JsonApi::token_gen(url, false, OvhTokenGen::new(delta, token)?)?,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_task(&self, task_id: i64) -> Result<OvhTaskResponse> {
|
||||
self.api
|
||||
.get(&format!(
|
||||
"v1/dedicated/server/{}/task/{}",
|
||||
self.name, task_id
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Poll a task until it completes
|
||||
async fn wait_for_task_result(&self, task_id: i64) -> Result<OvhTaskResponse> {
|
||||
loop {
|
||||
let status = self.get_task(task_id).await?;
|
||||
match status.status {
|
||||
OvhTaskStatus::Cancelled => {
|
||||
return Err(anyhow!(
|
||||
"Task was cancelled: {}",
|
||||
status.comment.unwrap_or_default()
|
||||
))
|
||||
}
|
||||
OvhTaskStatus::CustomerError => {
|
||||
return Err(anyhow!(
|
||||
"Task failed: {}",
|
||||
status.comment.unwrap_or_default()
|
||||
))
|
||||
}
|
||||
OvhTaskStatus::Done => return Ok(status),
|
||||
OvhTaskStatus::OvhError => {
|
||||
return Err(anyhow!(
|
||||
"Task failed: {}",
|
||||
status.comment.unwrap_or_default()
|
||||
))
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Router for OvhDedicatedServerVMacRouter {
|
||||
async fn generate_mac(&self, ip: &str, comment: &str) -> Result<Option<ArpEntry>> {
|
||||
info!("[OVH] Generating mac: {}={}", ip, comment);
|
||||
let rsp: OvhTaskResponse = self
|
||||
.api
|
||||
.post(
|
||||
&format!("v1/dedicated/server/{}/virtualMac", &self.name),
|
||||
OvhVMacRequest {
|
||||
ip_address: ip.to_string(),
|
||||
kind: OvhVMacType::Ovh,
|
||||
name: comment.to_string(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.wait_for_task_result(rsp.task_id).await?;
|
||||
|
||||
// api is shit, lookup ip address in list of arp entries
|
||||
let e = self.list_arp_entry().await?;
|
||||
Ok(e.into_iter().find(|e| e.address == ip))
|
||||
}
|
||||
|
||||
async fn list_arp_entry(&self) -> Result<Vec<ArpEntry>> {
|
||||
let rsp: Vec<String> = self
|
||||
.api
|
||||
.get(&format!("v1/dedicated/server/{}/virtualMac", &self.name))
|
||||
.await?;
|
||||
|
||||
let mut ret = vec![];
|
||||
for mac in rsp {
|
||||
let rsp2: Vec<String> = self
|
||||
.api
|
||||
.get(&format!(
|
||||
"v1/dedicated/server/{}/virtualMac/{}/virtualAddress",
|
||||
&self.name, mac
|
||||
))
|
||||
.await?;
|
||||
|
||||
for addr in rsp2 {
|
||||
ret.push(ArpEntry {
|
||||
id: Some(format!("{}={}", mac, &addr)),
|
||||
address: addr,
|
||||
mac_address: mac.clone(),
|
||||
interface: None,
|
||||
comment: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn add_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry> {
|
||||
info!(
|
||||
"[OVH] Adding mac ip: {} {}",
|
||||
entry.mac_address, entry.address
|
||||
);
|
||||
#[derive(Serialize)]
|
||||
struct AddVMacAddressRequest {
|
||||
#[serde(rename = "ipAddress")]
|
||||
pub ip_address: String,
|
||||
#[serde(rename = "virtualMachineName")]
|
||||
pub comment: String,
|
||||
}
|
||||
let id = format!("{}={}", &entry.mac_address, &entry.address);
|
||||
let task: OvhTaskResponse = self
|
||||
.api
|
||||
.post(
|
||||
&format!(
|
||||
"v1/dedicated/server/{}/virtualMac/{}/virtualAddress",
|
||||
&self.name, &entry.mac_address
|
||||
),
|
||||
AddVMacAddressRequest {
|
||||
ip_address: entry.address.clone(),
|
||||
comment: entry.comment.clone().unwrap_or(String::new()),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
self.wait_for_task_result(task.task_id).await?;
|
||||
|
||||
Ok(ArpEntry {
|
||||
id: Some(id),
|
||||
address: entry.address.clone(),
|
||||
mac_address: entry.mac_address.clone(),
|
||||
interface: None,
|
||||
comment: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn remove_arp_entry(&self, id: &str) -> Result<()> {
|
||||
let entries = self.list_arp_entry().await?;
|
||||
if let Some(this_entry) = entries.into_iter().find(|e| e.id == Some(id.to_string())) {
|
||||
info!(
|
||||
"[OVH] Deleting mac ip: {} {}",
|
||||
this_entry.mac_address, this_entry.address
|
||||
);
|
||||
let task: OvhTaskResponse = self
|
||||
.api
|
||||
.req(
|
||||
Method::DELETE,
|
||||
&format!(
|
||||
"v1/dedicated/server/{}/virtualMac/{}/virtualAddress/{}",
|
||||
self.name, this_entry.mac_address, this_entry.address
|
||||
),
|
||||
(),
|
||||
)
|
||||
.await?;
|
||||
self.wait_for_task_result(task.task_id).await?;
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("Cannot remove arp entry, not found")
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_arp_entry(&self, entry: &ArpEntry) -> Result<ArpEntry> {
|
||||
// cant patch just return the entry
|
||||
warn!("[OVH] Updating virtual mac is not supported");
|
||||
Ok(entry.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct OvhVMacRequest {
|
||||
#[serde(rename = "ipAddress")]
|
||||
pub ip_address: String,
|
||||
#[serde(rename = "type")]
|
||||
pub kind: OvhVMacType,
|
||||
#[serde(rename = "virtualMachineName")]
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum OvhVMacType {
|
||||
Ovh,
|
||||
VMWare,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct OvhTaskResponse {
|
||||
pub comment: Option<String>,
|
||||
pub done_date: Option<DateTime<Utc>>,
|
||||
pub function: OvhTaskFunction,
|
||||
pub last_update: Option<DateTime<Utc>>,
|
||||
pub need_schedule: bool,
|
||||
pub note: Option<String>,
|
||||
pub planned_intervention_id: Option<i64>,
|
||||
pub start_date: DateTime<Utc>,
|
||||
pub status: OvhTaskStatus,
|
||||
pub tags: Option<Vec<KVSimple>>,
|
||||
pub task_id: i64,
|
||||
pub ticket_reference: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct KVSimple {
|
||||
pub key: Option<String>,
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum OvhTaskStatus {
|
||||
Cancelled,
|
||||
CustomerError,
|
||||
Doing,
|
||||
Done,
|
||||
Init,
|
||||
OvhError,
|
||||
Todo,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum OvhTaskFunction {
|
||||
AddVirtualMac,
|
||||
MoveVirtualMac,
|
||||
VirtualMacAdd,
|
||||
VirtualMacDelete,
|
||||
RemoveVirtualMac
|
||||
}
|
167
src/settings.rs
167
src/settings.rs
@ -1,20 +1,28 @@
|
||||
use crate::dns::DnsServer;
|
||||
use crate::exchange::ExchangeRateService;
|
||||
use crate::fiat::FiatPaymentService;
|
||||
use crate::lightning::LightningNode;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::router::Router;
|
||||
use anyhow::Result;
|
||||
use isocountry::CountryCode;
|
||||
use lnvps_db::LNVpsDb;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Settings {
|
||||
/// Listen address for http server
|
||||
pub listen: Option<String>,
|
||||
|
||||
/// MYSQL connection string
|
||||
pub db: String,
|
||||
|
||||
/// Public URL mapping to this service
|
||||
pub public_url: String,
|
||||
|
||||
/// Lightning node config for creating LN payments
|
||||
pub lightning: LightningConfig,
|
||||
|
||||
@ -24,24 +32,24 @@ pub struct Settings {
|
||||
/// Provisioning profiles
|
||||
pub provisioner: ProvisionerConfig,
|
||||
|
||||
/// Network policy
|
||||
#[serde(default)]
|
||||
pub network_policy: NetworkPolicy,
|
||||
|
||||
/// Number of days after an expired VM is deleted
|
||||
pub delete_after: u16,
|
||||
|
||||
/// SMTP settings for sending emails
|
||||
pub smtp: Option<SmtpConfig>,
|
||||
|
||||
/// Network router config
|
||||
pub router: Option<RouterConfig>,
|
||||
|
||||
/// DNS configurations for PTR records
|
||||
pub dns: Option<DnsServerConfig>,
|
||||
|
||||
/// Nostr config for sending DMs
|
||||
pub nostr: Option<NostrConfig>,
|
||||
|
||||
/// Config for accepting revolut payments
|
||||
pub revolut: Option<RevolutConfig>,
|
||||
|
||||
#[serde(default)]
|
||||
/// Tax rates to change per country as a percent of the amount
|
||||
pub tax_rate: HashMap<CountryCode, f32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
@ -68,63 +76,28 @@ pub struct NostrConfig {
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum RouterConfig {
|
||||
Mikrotik {
|
||||
url: String,
|
||||
username: String,
|
||||
password: String,
|
||||
},
|
||||
pub struct DnsServerConfig {
|
||||
pub forward_zone_id: String,
|
||||
pub api: DnsServerApi,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DnsServerConfig {
|
||||
pub enum DnsServerApi {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Cloudflare {
|
||||
token: String,
|
||||
forward_zone_id: String,
|
||||
reverse_zone_id: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Policy that determines how packets arrive at the VM
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum NetworkAccessPolicy {
|
||||
/// No special procedure required for packets to arrive
|
||||
#[default]
|
||||
Auto,
|
||||
/// ARP entries are added statically on the access router
|
||||
StaticArp {
|
||||
/// Interface used to add arp entries
|
||||
interface: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct NetworkPolicy {
|
||||
/// Policy that determines how packets arrive at the VM
|
||||
pub access: NetworkAccessPolicy,
|
||||
|
||||
/// Use SLAAC for IPv6 allocation
|
||||
pub ip6_slaac: Option<bool>,
|
||||
Cloudflare { token: String },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct SmtpConfig {
|
||||
/// Admin user id, for sending system notifications
|
||||
pub admin: Option<u64>,
|
||||
|
||||
/// Email server host:port
|
||||
pub server: String,
|
||||
|
||||
/// From header to use, otherwise empty
|
||||
pub from: Option<String>,
|
||||
|
||||
/// Username for SMTP connection
|
||||
pub username: String,
|
||||
|
||||
/// Password for SMTP connection
|
||||
pub password: String,
|
||||
}
|
||||
@ -162,12 +135,19 @@ pub struct QemuConfig {
|
||||
pub bridge: String,
|
||||
/// CPU type
|
||||
pub cpu: String,
|
||||
/// VLAN tag all spawned VM's
|
||||
pub vlan: Option<u16>,
|
||||
/// Enable virtualization inside VM
|
||||
pub kvm: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RevolutConfig {
|
||||
pub url: Option<String>,
|
||||
pub api_version: String,
|
||||
pub token: String,
|
||||
pub public_key: String,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
pub fn get_provisioner(
|
||||
&self,
|
||||
@ -178,32 +158,6 @@ impl Settings {
|
||||
Arc::new(LNVpsProvisioner::new(self.clone(), db, node, exchange))
|
||||
}
|
||||
|
||||
pub fn get_router(&self) -> Result<Option<Arc<dyn Router>>> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
if let Some(_router) = &self.router {
|
||||
let router = crate::mocks::MockRouter::new(self.network_policy.clone());
|
||||
Ok(Some(Arc::new(router)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
match &self.router {
|
||||
#[cfg(feature = "mikrotik")]
|
||||
Some(RouterConfig::Mikrotik {
|
||||
url,
|
||||
username,
|
||||
password,
|
||||
}) => Ok(Some(Arc::new(crate::router::MikrotikRouter::new(
|
||||
url, username, password,
|
||||
)))),
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_dns(&self) -> Result<Option<Arc<dyn DnsServer>>> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
@ -213,17 +167,58 @@ impl Settings {
|
||||
{
|
||||
match &self.dns {
|
||||
None => Ok(None),
|
||||
#[cfg(feature = "cloudflare")]
|
||||
Some(DnsServerConfig::Cloudflare {
|
||||
token,
|
||||
forward_zone_id,
|
||||
reverse_zone_id,
|
||||
}) => Ok(Some(Arc::new(crate::dns::Cloudflare::new(
|
||||
token,
|
||||
reverse_zone_id,
|
||||
forward_zone_id,
|
||||
)))),
|
||||
Some(c) => match &c.api {
|
||||
#[cfg(feature = "cloudflare")]
|
||||
DnsServerApi::Cloudflare { token } => {
|
||||
Ok(Some(Arc::new(crate::dns::Cloudflare::new(token))))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_revolut(&self) -> Result<Option<Arc<dyn FiatPaymentService>>> {
|
||||
match &self.revolut {
|
||||
#[cfg(feature = "revolut")]
|
||||
Some(c) => Ok(Some(Arc::new(crate::fiat::RevolutApi::new(c.clone())?))),
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn mock_settings() -> Settings {
|
||||
Settings {
|
||||
listen: None,
|
||||
db: "".to_string(),
|
||||
public_url: "http://localhost:8000".to_string(),
|
||||
lightning: LightningConfig::LND {
|
||||
url: "".to_string(),
|
||||
cert: Default::default(),
|
||||
macaroon: Default::default(),
|
||||
},
|
||||
read_only: false,
|
||||
provisioner: ProvisionerConfig::Proxmox {
|
||||
qemu: QemuConfig {
|
||||
machine: "q35".to_string(),
|
||||
os_type: "l26".to_string(),
|
||||
bridge: "vmbr1".to_string(),
|
||||
cpu: "kvm64".to_string(),
|
||||
kvm: false,
|
||||
},
|
||||
ssh: None,
|
||||
mac_prefix: Some("ff:ff:ff".to_string()),
|
||||
},
|
||||
delete_after: 0,
|
||||
smtp: None,
|
||||
dns: Some(DnsServerConfig {
|
||||
forward_zone_id: "mock-forward-zone-id".to_string(),
|
||||
api: DnsServerApi::Cloudflare {
|
||||
token: "abc".to_string(),
|
||||
},
|
||||
}),
|
||||
nostr: None,
|
||||
revolut: None,
|
||||
tax_rate: HashMap::from([(CountryCode::IRL, 23.0), (CountryCode::USA, 1.0)]),
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use log::info;
|
||||
use ssh2::Channel;
|
||||
use std::io::Read;
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tokio::net::{TcpStream, ToSocketAddrs};
|
||||
|
||||
pub struct SshClient {
|
||||
@ -34,6 +34,12 @@ impl SshClient {
|
||||
Ok(channel)
|
||||
}
|
||||
|
||||
pub fn tunnel_unix_socket(&mut self, remote_path: &Path) -> Result<Channel> {
|
||||
self.session
|
||||
.channel_direct_streamlocal(remote_path.to_str().unwrap(), None)
|
||||
.map_err(|e| anyhow!(e))
|
||||
}
|
||||
|
||||
pub async fn execute(&mut self, command: &str) -> Result<(i32, String)> {
|
||||
info!("Executing command: {}", command);
|
||||
let mut channel = self.session.channel_session()?;
|
||||
|
127
src/worker.rs
127
src/worker.rs
@ -2,13 +2,13 @@ use crate::host::get_host_client;
|
||||
use crate::provisioner::LNVpsProvisioner;
|
||||
use crate::settings::{ProvisionerConfig, Settings, SmtpConfig};
|
||||
use crate::status::{VmRunningState, VmState, VmStateCache};
|
||||
use anyhow::Result;
|
||||
use anyhow::{bail, Result};
|
||||
use chrono::{DateTime, Datelike, Days, Utc};
|
||||
use lettre::message::{MessageBuilder, MultiPart};
|
||||
use lettre::transport::smtp::authentication::Credentials;
|
||||
use lettre::AsyncTransport;
|
||||
use lettre::{AsyncSmtpTransport, Tokio1Executor};
|
||||
use lnvps_db::{LNVpsDb, Vm};
|
||||
use lnvps_db::{LNVpsDb, Vm, VmHost};
|
||||
use log::{debug, error, info, warn};
|
||||
use nostr::{EventBuilder, PublicKey, ToBech32};
|
||||
use nostr_sdk::Client;
|
||||
@ -18,6 +18,8 @@ use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum WorkJob {
|
||||
/// Sync resources from hosts to database
|
||||
PatchHosts,
|
||||
/// Check all running VMS
|
||||
CheckVms,
|
||||
/// Check the VM status matches database state
|
||||
@ -251,7 +253,7 @@ impl Worker {
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
c.send_event(ev).await?;
|
||||
c.send_event(&ev).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -278,46 +280,97 @@ impl Worker {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self) -> Result<()> {
|
||||
while let Some(job) = self.rx.recv().await {
|
||||
match &job {
|
||||
WorkJob::CheckVm { vm_id } => {
|
||||
let vm = self.db.get_vm(*vm_id).await?;
|
||||
if let Err(e) = self.check_vm(&vm).await {
|
||||
error!("Failed to check VM {}: {}", vm_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM {}:\n{:?}\n{}", vm_id, &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
async fn patch_host(&self, host: &mut VmHost) -> Result<()> {
|
||||
let client = match get_host_client(host, &self.settings.provisioner_config) {
|
||||
Ok(h) => h,
|
||||
Err(e) => bail!("Failed to get host client: {} {}", host.name, e),
|
||||
};
|
||||
let info = client.get_info().await?;
|
||||
let needs_update = info.cpu != host.cpu || info.memory != host.memory;
|
||||
if needs_update {
|
||||
host.cpu = info.cpu;
|
||||
host.memory = info.memory;
|
||||
self.db.update_host(host).await?;
|
||||
info!(
|
||||
"Updated host {}: cpu={}, memory={}",
|
||||
host.name, host.cpu, host.memory
|
||||
);
|
||||
}
|
||||
|
||||
let mut host_disks = self.db.list_host_disks(host.id).await?;
|
||||
for disk in &info.disks {
|
||||
if let Some(mut hd) = host_disks.iter_mut().find(|d| d.name == disk.name) {
|
||||
if hd.size != disk.size {
|
||||
hd.size = disk.size;
|
||||
self.db.update_host_disk(hd).await?;
|
||||
info!(
|
||||
"Updated host disk {}: size={},type={},interface={}",
|
||||
hd.name, hd.size, hd.kind, hd.interface
|
||||
);
|
||||
}
|
||||
} else {
|
||||
warn!("Un-mapped host disk {}", disk.name);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_job(&mut self, job: &WorkJob) -> Result<()> {
|
||||
match job {
|
||||
WorkJob::PatchHosts => {
|
||||
let mut hosts = self.db.list_hosts().await?;
|
||||
for mut host in &mut hosts {
|
||||
info!("Patching host {}", host.name);
|
||||
if let Err(e) = self.patch_host(&mut host).await {
|
||||
error!("Failed to patch host {}: {}", host.name, e);
|
||||
}
|
||||
}
|
||||
WorkJob::SendNotification {
|
||||
user_id,
|
||||
message,
|
||||
title,
|
||||
} => {
|
||||
if let Err(e) = self
|
||||
.send_notification(*user_id, message.clone(), title.clone())
|
||||
.await
|
||||
{
|
||||
error!("Failed to send notification {}: {}", user_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to send notification:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
WorkJob::CheckVm { vm_id } => {
|
||||
let vm = self.db.get_vm(*vm_id).await?;
|
||||
if let Err(e) = self.check_vm(&vm).await {
|
||||
error!("Failed to check VM {}: {}", vm_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM {}:\n{:?}\n{}", vm_id, &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
WorkJob::CheckVms => {
|
||||
if let Err(e) = self.check_vms().await {
|
||||
error!("Failed to check VMs: {}", e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM's:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
WorkJob::SendNotification {
|
||||
user_id,
|
||||
message,
|
||||
title,
|
||||
} => {
|
||||
if let Err(e) = self
|
||||
.send_notification(*user_id, message.clone(), title.clone())
|
||||
.await
|
||||
{
|
||||
error!("Failed to send notification {}: {}", user_id, e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to send notification:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
WorkJob::CheckVms => {
|
||||
if let Err(e) = self.check_vms().await {
|
||||
error!("Failed to check VMs: {}", e);
|
||||
self.queue_admin_notification(
|
||||
format!("Failed to check VM's:\n{:?}\n{}", &job, e),
|
||||
Some("Job Failed".to_string()),
|
||||
)?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle(&mut self) -> Result<()> {
|
||||
while let Some(job) = self.rx.recv().await {
|
||||
if let Err(e) = self.try_job(&job).await {
|
||||
error!("Job failed to execute: {:?} {}", job, e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user