Compare commits
1 Commits
master
...
3e6232ad91
Author | SHA1 | Date | |
---|---|---|---|
3e6232ad91 |
29
.drone.yml
29
.drone.yml
@ -5,14 +5,13 @@ name: default
|
||||
|
||||
steps:
|
||||
- name: web_build
|
||||
image: node:23
|
||||
image: node:22
|
||||
volumes:
|
||||
- name: web_app
|
||||
path: /tmp/web_build
|
||||
commands:
|
||||
- cd virtweb_frontend
|
||||
- npm install
|
||||
- npm run lint
|
||||
- npm run build
|
||||
- mv dist /tmp/web_build
|
||||
|
||||
@ -26,7 +25,6 @@ steps:
|
||||
- rustup component add clippy
|
||||
- cd virtweb_backend
|
||||
- cargo clippy -- -D warnings
|
||||
- cargo clippy --examples -- -D warnings
|
||||
- cargo test
|
||||
|
||||
- name: backend_compile
|
||||
@ -36,8 +34,6 @@ steps:
|
||||
path: /usr/local/cargo/registry
|
||||
- name: web_app
|
||||
path: /tmp/web_build
|
||||
- name: release
|
||||
path: /tmp/release
|
||||
depends_on:
|
||||
- backend_check
|
||||
- web_build
|
||||
@ -46,32 +42,11 @@ steps:
|
||||
- cd virtweb_backend
|
||||
- mv /tmp/web_build/dist static
|
||||
- cargo build --release
|
||||
- cargo build --release --example api_curl
|
||||
- ls -lah target/release/virtweb_backend target/release/examples/api_curl
|
||||
- cp target/release/virtweb_backend target/release/examples/api_curl /tmp/release
|
||||
- ls -lah target/release/virtweb_backend
|
||||
|
||||
- name: gitea_release
|
||||
image: plugins/gitea-release
|
||||
depends_on:
|
||||
- backend_compile
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
volumes:
|
||||
- name: release
|
||||
path: /tmp/release
|
||||
environment:
|
||||
PLUGIN_API_KEY:
|
||||
from_secret: API_KEY
|
||||
settings:
|
||||
base_url: https://gitea.communiquons.org
|
||||
files: /tmp/release/*
|
||||
checksum: sha512
|
||||
|
||||
volumes:
|
||||
- name: rust_registry
|
||||
temp: {}
|
||||
- name: web_app
|
||||
temp: {}
|
||||
- name: release
|
||||
temp: {}
|
||||
|
@ -1,3 +1,9 @@
|
||||
{
|
||||
"extends": ["local>renovate/presets"]
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"packageRules": [
|
||||
{
|
||||
"matchUpdateTypes": ["major", "minor", "patch"],
|
||||
"automerge": true
|
||||
}
|
||||
]
|
||||
}
|
2291
virtweb_backend/Cargo.lock
generated
2291
virtweb_backend/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,49 +1,48 @@
|
||||
[package]
|
||||
name = "virtweb_backend"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.27"
|
||||
env_logger = "0.11.8"
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
light-openid = { version = "1.0.4", features = ["crypto-wrapper"] }
|
||||
lazy_static = "1.5.0"
|
||||
actix = "0.13.5"
|
||||
actix-web = "4.11.0"
|
||||
log = "0.4.21"
|
||||
env_logger = "0.11.3"
|
||||
clap = { version = "4.5.4", features = ["derive", "env"] }
|
||||
light-openid = { version = "1.0.2", features = ["crypto-wrapper"] }
|
||||
lazy_static = "1.4.0"
|
||||
actix = "0.13.3"
|
||||
actix-web = "4.5.1"
|
||||
actix-remote-ip = "0.1.0"
|
||||
actix-session = { version = "0.10.1", features = ["cookie-session"] }
|
||||
actix-identity = "0.8.0"
|
||||
actix-cors = "0.7.1"
|
||||
actix-files = "0.6.6"
|
||||
actix-ws = "0.3.0"
|
||||
actix-http = "3.11.0"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde_yml = "0.0.12"
|
||||
quick-xml = { version = "0.37.5", features = ["serialize", "overlapped-lists"] }
|
||||
futures-util = "0.3.31"
|
||||
anyhow = "1.0.98"
|
||||
actix-multipart = "0.7.2"
|
||||
tempfile = "3.20.0"
|
||||
reqwest = { version = "0.12.20", features = ["stream"] }
|
||||
url = "2.5.4"
|
||||
virt = "0.4.2"
|
||||
sysinfo = { version = "0.35.1", features = ["serde"] }
|
||||
uuid = { version = "1.16.0", features = ["v4", "serde"] }
|
||||
lazy-regex = "3.4.1"
|
||||
thiserror = "2.0.12"
|
||||
image = "0.25.6"
|
||||
rand = "0.9.1"
|
||||
tokio = { version = "1.45.0", features = ["rt", "time", "macros"] }
|
||||
futures = "0.3.31"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
num = "0.4.3"
|
||||
rust-embed = { version = "8.7.2", features = ["mime-guess"] }
|
||||
actix-session = { version = "0.9.0", features = ["cookie-session"] }
|
||||
actix-identity = "0.7.1"
|
||||
actix-cors = "0.7.0"
|
||||
actix-files = "0.6.5"
|
||||
actix-web-actors = "4.3.0"
|
||||
actix-http = "3.6.0"
|
||||
serde = { version = "1.0.199", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
quick-xml = { version = "0.33.0", features = ["serialize", "overlapped-lists"] }
|
||||
futures-util = "0.3.30"
|
||||
anyhow = "1.0.82"
|
||||
actix-multipart = "0.6.1"
|
||||
tempfile = "3.10.1"
|
||||
reqwest = { version = "0.12.4", features = ["stream"] }
|
||||
url = "2.5.0"
|
||||
virt = "0.3.1"
|
||||
sysinfo = { version = "0.30.11", features = ["serde"] }
|
||||
uuid = { version = "1.8.0", features = ["v4", "serde"] }
|
||||
lazy-regex = "3.1.0"
|
||||
thiserror = "1.0.59"
|
||||
image = "0.25.1"
|
||||
rand = "0.8.5"
|
||||
bytes = "1.6.0"
|
||||
tokio = "1.37.0"
|
||||
futures = "0.3.30"
|
||||
ipnetwork = "0.20.0"
|
||||
num = "0.4.2"
|
||||
rust-embed = { version = "8.3.0" }
|
||||
mime_guess = "2.0.4"
|
||||
dotenvy = "0.15.7"
|
||||
nix = { version = "0.30.1", features = ["net"] }
|
||||
basic-jwt = "0.3.0"
|
||||
zip = "4.1.0"
|
||||
chrono = "0.4.41"
|
||||
nix = { version = "0.28.0", features = ["net"] }
|
||||
basic-jwt = "0.2.0"
|
@ -1,86 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 10.0, SVG Export Plug-In . SVG Version: 3.0.0 Build 77) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd" [
|
||||
<!ENTITY ns_flows "http://ns.adobe.com/Flows/1.0/">
|
||||
<!ENTITY ns_extend "http://ns.adobe.com/Extensibility/1.0/">
|
||||
<!ENTITY ns_ai "http://ns.adobe.com/AdobeIllustrator/10.0/">
|
||||
<!ENTITY ns_graphs "http://ns.adobe.com/Graphs/1.0/">
|
||||
<!ENTITY ns_vars "http://ns.adobe.com/Variables/1.0/">
|
||||
<!ENTITY ns_imrep "http://ns.adobe.com/ImageReplacement/1.0/">
|
||||
<!ENTITY ns_sfw "http://ns.adobe.com/SaveForWeb/1.0/">
|
||||
<!ENTITY ns_custom "http://ns.adobe.com/GenericCustomNamespace/1.0/">
|
||||
<!ENTITY ns_adobe_xpath "http://ns.adobe.com/XPath/1.0/">
|
||||
<!ENTITY ns_svg "http://www.w3.org/2000/svg">
|
||||
<!ENTITY ns_xlink "http://www.w3.org/1999/xlink">
|
||||
]>
|
||||
<svg
|
||||
xmlns:x="&ns_extend;" xmlns:i="&ns_ai;" xmlns:graph="&ns_graphs;" i:viewOrigin="262 450" i:rulerOrigin="0 0" i:pageBounds="0 792 612 0"
|
||||
xmlns="&ns_svg;" xmlns:xlink="&ns_xlink;" xmlns:a="http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/"
|
||||
width="87.041" height="108.445" viewBox="0 0 87.041 108.445" overflow="visible" enable-background="new 0 0 87.041 108.445"
|
||||
xml:space="preserve">
|
||||
<metadata>
|
||||
<variableSets xmlns="&ns_vars;">
|
||||
<variableSet varSetName="binding1" locked="none">
|
||||
<variables></variables>
|
||||
<v:sampleDataSets xmlns="&ns_custom;" xmlns:v="&ns_vars;"></v:sampleDataSets>
|
||||
</variableSet>
|
||||
</variableSets>
|
||||
<sfw xmlns="&ns_sfw;">
|
||||
<slices></slices>
|
||||
<sliceSourceBounds y="341.555" x="262" width="87.041" height="108.445" bottomLeftOrigin="true"></sliceSourceBounds>
|
||||
</sfw>
|
||||
</metadata>
|
||||
<g id="Layer_1" i:layer="yes" i:dimmedPercent="50" i:rgbTrio="#4F008000FFFF">
|
||||
<g>
|
||||
<path i:knockout="Off" fill="#A80030" d="M51.986,57.297c-1.797,0.025,0.34,0.926,2.686,1.287
|
||||
c0.648-0.506,1.236-1.018,1.76-1.516C54.971,57.426,53.484,57.434,51.986,57.297"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M61.631,54.893c1.07-1.477,1.85-3.094,2.125-4.766c-0.24,1.192-0.887,2.221-1.496,3.307
|
||||
c-3.359,2.115-0.316-1.256-0.002-2.537C58.646,55.443,61.762,53.623,61.631,54.893"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M65.191,45.629c0.217-3.236-0.637-2.213-0.924-0.978
|
||||
C64.602,44.825,64.867,46.932,65.191,45.629"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M45.172,1.399c0.959,0.172,2.072,0.304,1.916,0.533
|
||||
C48.137,1.702,48.375,1.49,45.172,1.399"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M47.088,1.932l-0.678,0.14l0.631-0.056L47.088,1.932"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M76.992,46.856c0.107,2.906-0.85,4.316-1.713,6.812l-1.553,0.776
|
||||
c-1.271,2.468,0.123,1.567-0.787,3.53c-1.984,1.764-6.021,5.52-7.313,5.863c-0.943-0.021,0.639-1.113,0.846-1.541
|
||||
c-2.656,1.824-2.131,2.738-6.193,3.846l-0.119-0.264c-10.018,4.713-23.934-4.627-23.751-17.371
|
||||
c-0.107,0.809-0.304,0.607-0.526,0.934c-0.517-6.557,3.028-13.143,9.007-15.832c5.848-2.895,12.704-1.707,16.893,2.197
|
||||
c-2.301-3.014-6.881-6.209-12.309-5.91c-5.317,0.084-10.291,3.463-11.951,7.131c-2.724,1.715-3.04,6.611-4.227,7.507
|
||||
C31.699,56.271,36.3,61.342,44.083,67.307c1.225,0.826,0.345,0.951,0.511,1.58c-2.586-1.211-4.954-3.039-6.901-5.277
|
||||
c1.033,1.512,2.148,2.982,3.589,4.137c-2.438-0.826-5.695-5.908-6.646-6.115c4.203,7.525,17.052,13.197,23.78,10.383
|
||||
c-3.113,0.115-7.068,0.064-10.566-1.229c-1.469-0.756-3.467-2.322-3.11-2.615c9.182,3.43,18.667,2.598,26.612-3.771
|
||||
c2.021-1.574,4.229-4.252,4.867-4.289c-0.961,1.445,0.164,0.695-0.574,1.971c2.014-3.248-0.875-1.322,2.082-5.609l1.092,1.504
|
||||
c-0.406-2.696,3.348-5.97,2.967-10.234c0.861-1.304,0.961,1.403,0.047,4.403c1.268-3.328,0.334-3.863,0.66-6.609
|
||||
c0.352,0.923,0.814,1.904,1.051,2.878c-0.826-3.216,0.848-5.416,1.262-7.285c-0.408-0.181-1.275,1.422-1.473-2.377
|
||||
c0.029-1.65,0.459-0.865,0.625-1.271c-0.324-0.186-1.174-1.451-1.691-3.877c0.375-0.57,1.002,1.478,1.512,1.562
|
||||
c-0.328-1.929-0.893-3.4-0.916-4.88c-1.49-3.114-0.527,0.415-1.736-1.337c-1.586-4.947,1.316-1.148,1.512-3.396
|
||||
c2.404,3.483,3.775,8.881,4.404,11.117c-0.48-2.726-1.256-5.367-2.203-7.922c0.73,0.307-1.176-5.609,0.949-1.691
|
||||
c-2.27-8.352-9.715-16.156-16.564-19.818c0.838,0.767,1.896,1.73,1.516,1.881c-3.406-2.028-2.807-2.186-3.295-3.043
|
||||
c-2.775-1.129-2.957,0.091-4.795,0.002c-5.23-2.774-6.238-2.479-11.051-4.217l0.219,1.023c-3.465-1.154-4.037,0.438-7.782,0.004
|
||||
c-0.228-0.178,1.2-0.644,2.375-0.815c-3.35,0.442-3.193-0.66-6.471,0.122c0.808-0.567,1.662-0.942,2.524-1.424
|
||||
c-2.732,0.166-6.522,1.59-5.352,0.295c-4.456,1.988-12.37,4.779-16.811,8.943l-0.14-0.933c-2.035,2.443-8.874,7.296-9.419,10.46
|
||||
l-0.544,0.127c-1.059,1.793-1.744,3.825-2.584,5.67c-1.385,2.36-2.03,0.908-1.833,1.278c-2.724,5.523-4.077,10.164-5.246,13.97
|
||||
c0.833,1.245,0.02,7.495,0.335,12.497c-1.368,24.704,17.338,48.69,37.785,54.228c2.997,1.072,7.454,1.031,11.245,1.141
|
||||
c-4.473-1.279-5.051-0.678-9.408-2.197c-3.143-1.48-3.832-3.17-6.058-5.102l0.881,1.557c-4.366-1.545-2.539-1.912-6.091-3.037
|
||||
l0.941-1.229c-1.415-0.107-3.748-2.385-4.386-3.646l-1.548,0.061c-1.86-2.295-2.851-3.949-2.779-5.23l-0.5,0.891
|
||||
c-0.567-0.973-6.843-8.607-3.587-6.83c-0.605-0.553-1.409-0.9-2.281-2.484l0.663-0.758c-1.567-2.016-2.884-4.6-2.784-5.461
|
||||
c0.836,1.129,1.416,1.34,1.99,1.533c-3.957-9.818-4.179-0.541-7.176-9.994l0.634-0.051c-0.486-0.732-0.781-1.527-1.172-2.307
|
||||
l0.276-2.75C4.667,58.121,6.719,47.409,7.13,41.534c0.285-2.389,2.378-4.932,3.97-8.92l-0.97-0.167
|
||||
c1.854-3.234,10.586-12.988,14.63-12.486c1.959-2.461-0.389-0.009-0.772-0.629c4.303-4.453,5.656-3.146,8.56-3.947
|
||||
c3.132-1.859-2.688,0.725-1.203-0.709c5.414-1.383,3.837-3.144,10.9-3.846c0.745,0.424-1.729,0.655-2.35,1.205
|
||||
c4.511-2.207,14.275-1.705,20.617,1.225c7.359,3.439,15.627,13.605,15.953,23.17l0.371,0.1
|
||||
c-0.188,3.802,0.582,8.199-0.752,12.238L76.992,46.856"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M32.372,59.764l-0.252,1.26c1.181,1.604,2.118,3.342,3.626,4.596
|
||||
C34.661,63.502,33.855,62.627,32.372,59.764"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M35.164,59.654c-0.625-0.691-0.995-1.523-1.409-2.352
|
||||
c0.396,1.457,1.207,2.709,1.962,3.982L35.164,59.654"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M84.568,48.916l-0.264,0.662c-0.484,3.438-1.529,6.84-3.131,9.994
|
||||
C82.943,56.244,84.088,52.604,84.568,48.916"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M45.527,0.537C46.742,0.092,48.514,0.293,49.803,0c-1.68,0.141-3.352,0.225-5.003,0.438
|
||||
L45.527,0.537"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M2.872,23.219c0.28,2.592-1.95,3.598,0.494,1.889
|
||||
C4.676,22.157,2.854,24.293,2.872,23.219"/>
|
||||
<path i:knockout="Off" fill="#A80030" d="M0,35.215c0.563-1.728,0.665-2.766,0.88-3.766C-0.676,33.438,0.164,33.862,0,35.215"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
Before Width: | Height: | Size: 6.7 KiB |
Binary file not shown.
Before Width: | Height: | Size: 194 KiB |
@ -1,8 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 100 100">
|
||||
<circle fill="#f47421" cy="50" cx="50" r="45"/>
|
||||
<circle fill="none" stroke="#ffffff" stroke-width="8.55" cx="50" cy="50" r="21.825"/>
|
||||
<g id="friend"><circle fill="#f47421" cx="19.4" cy="50" r="8.4376"/>
|
||||
<path stroke="#f47421" stroke-width="3.2378" d="M67,50H77"/>
|
||||
<circle fill="#ffffff" cx="19.4" cy="50" r="6.00745"/></g>
|
||||
<use xlink:href="#friend" transform="rotate(120,50,50)"/>
|
||||
<use xlink:href="#friend" transform="rotate(240,50,50)"/></svg>
|
Before Width: | Height: | Size: 550 B |
@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" height="88" width="88" xmlns:v="https://vecta.io/nano"><path d="M0 12.402l35.687-4.86.016 34.423-35.67.203zm35.67 33.529l.028 34.453L.028 75.48.026 45.7zm4.326-39.025L87.314 0v41.527l-47.318.376zm47.329 39.349l-.011 41.34-47.318-6.678-.066-34.739z" fill="#00adef"/></svg>
|
Before Width: | Height: | Size: 311 B |
@ -1,47 +0,0 @@
|
||||
[
|
||||
{
|
||||
"name": "Ubuntu releases",
|
||||
"url": "https://releases.ubuntu.com",
|
||||
"image": "/assets/img/ubuntu.svg"
|
||||
},
|
||||
{
|
||||
"name": "Old ubuntu releases",
|
||||
"url": "https://old-releases.ubuntu.com/releases/",
|
||||
"image": "/assets/img/ubuntu.svg"
|
||||
},
|
||||
{
|
||||
"name": "Current Debian releases (amd64)",
|
||||
"url": "https://cdimage.debian.org/mirror/cdimage/release/current/amd64/iso-dvd/",
|
||||
"image": "/assets/img/debian.svg"
|
||||
},
|
||||
{
|
||||
"name": "Old Debian releases",
|
||||
"url": "https://cdimage.debian.org/mirror/cdimage/archive/",
|
||||
"image": "/assets/img/debian.svg"
|
||||
},
|
||||
{
|
||||
"name": "Latest stable Virtio driver",
|
||||
"url": "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso",
|
||||
"image": "/assets/img/kvm.png"
|
||||
},
|
||||
{
|
||||
"name": "Windows server 2025",
|
||||
"url": "https://www.microsoft.com/en-us/evalcenter/download-windows-server-2025",
|
||||
"image": "/assets/img/windows.svg"
|
||||
},
|
||||
{
|
||||
"name": "Windows server 2022",
|
||||
"url": "https://www.microsoft.com/en-us/evalcenter/download-windows-server-2022",
|
||||
"image": "/assets/img/windows.svg"
|
||||
},
|
||||
{
|
||||
"name": "Windows 11",
|
||||
"url": "https://www.microsoft.com/en-us/software-download/windows11",
|
||||
"image": "/assets/img/windows.svg"
|
||||
},
|
||||
{
|
||||
"name": "Windows 11 Iot Enterprise LTSC 2024",
|
||||
"url": "https://www.microsoft.com/en-us/evalcenter/download-windows-11-iot-enterprise-ltsc-eval",
|
||||
"image": "/assets/img/windows.svg"
|
||||
}
|
||||
]
|
@ -1,8 +1,9 @@
|
||||
services:
|
||||
oidc:
|
||||
image: dexidp/dex
|
||||
image: qlik/simple-oidc-provider
|
||||
environment:
|
||||
- REDIRECTS=http://localhost:3000/oidc_cb,http://localhost:5173/oidc_cb
|
||||
- PORT=9001
|
||||
ports:
|
||||
- 9001:9001
|
||||
volumes:
|
||||
- ./docker/dex:/conf:ro
|
||||
command: [ "dex", "serve", "/conf/dex.config.yaml" ]
|
||||
- 9001:9001
|
||||
|
||||
|
@ -1,27 +0,0 @@
|
||||
issuer: http://127.0.0.1:9001/dex
|
||||
|
||||
storage:
|
||||
type: memory
|
||||
|
||||
web:
|
||||
http: 0.0.0.0:9001
|
||||
|
||||
oauth2:
|
||||
# Automate some clicking
|
||||
# Note: this might actually make some tests pass that otherwise wouldn't.
|
||||
skipApprovalScreen: false
|
||||
|
||||
connectors:
|
||||
# Note: this might actually make some tests pass that otherwise wouldn't.
|
||||
- type: mockCallback
|
||||
id: mock
|
||||
name: Example
|
||||
|
||||
# Basic OP test suite requires two clients.
|
||||
staticClients:
|
||||
- id: foo
|
||||
secret: bar
|
||||
redirectURIs:
|
||||
- http://localhost:3000/oidc_cb
|
||||
- http://localhost:5173/oidc_cb
|
||||
name: Project
|
@ -55,13 +55,12 @@ fn main() {
|
||||
|
||||
let jwt = key.sign_jwt(&claims).expect("Failed to sign JWT!");
|
||||
|
||||
let err = Command::new("curl")
|
||||
Command::new("curl")
|
||||
.args(["-X", &args.verb])
|
||||
.args(["-H", &format!("x-token-id: {}", args.token_id)])
|
||||
.args(["-H", &format!("x-token-content: {jwt}")])
|
||||
.args(args.run)
|
||||
.arg(full_url)
|
||||
.exec();
|
||||
|
||||
panic!("Failed to run cURL! {err}")
|
||||
panic!("Failed to run curl!")
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ impl LibVirtActor {
|
||||
"Will connect to hypvervisor at address '{}'",
|
||||
hypervisor_uri
|
||||
);
|
||||
let conn = Connect::open(Some(hypervisor_uri))?;
|
||||
let conn = Connect::open(hypervisor_uri)?;
|
||||
|
||||
Ok(Self { m: conn })
|
||||
}
|
||||
@ -182,13 +182,6 @@ impl Handler<DeleteDomainReq> for LibVirtActor {
|
||||
false => sys::VIR_DOMAIN_UNDEFINE_NVRAM,
|
||||
})?;
|
||||
|
||||
// Delete associated cloud init disk
|
||||
let cloud_init_disk = AppConfig::get().cloud_init_disk_path_for_vm(&domain_name);
|
||||
if cloud_init_disk.exists() {
|
||||
std::fs::remove_file(cloud_init_disk)?;
|
||||
}
|
||||
|
||||
// If requested, delete block storage associated with the VM
|
||||
if !msg.keep_files {
|
||||
log::info!("Delete storage associated with the domain");
|
||||
let path = AppConfig::get().vm_storage_path(msg.id);
|
||||
|
@ -1,3 +1,3 @@
|
||||
pub mod libvirt_actor;
|
||||
pub mod vnc_handler;
|
||||
pub mod vnc_actor;
|
||||
pub mod vnc_tokens_actor;
|
||||
|
209
virtweb_backend/src/actors/vnc_actor.rs
Normal file
209
virtweb_backend/src/actors/vnc_actor.rs
Normal file
@ -0,0 +1,209 @@
|
||||
use actix::{Actor, ActorContext, AsyncContext, Handler, StreamHandler};
|
||||
use actix_http::ws::Item;
|
||||
use actix_web_actors::ws;
|
||||
use actix_web_actors::ws::Message;
|
||||
use bytes::Bytes;
|
||||
use image::EncodableLayout;
|
||||
use std::path::Path;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::unix::{OwnedReadHalf, OwnedWriteHalf};
|
||||
use tokio::net::UnixStream;
|
||||
|
||||
/// How often heartbeat pings are sent
|
||||
const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
||||
/// How long before lack of client response causes a timeout
|
||||
const CLIENT_TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum VNCError {
|
||||
#[error("Socket file does not exists!")]
|
||||
SocketDoesNotExists,
|
||||
}
|
||||
|
||||
pub struct VNCActor {
|
||||
/// Qemu -> WS
|
||||
read_half: Option<OwnedReadHalf>,
|
||||
|
||||
/// WS -> Qemu
|
||||
write_half: OwnedWriteHalf,
|
||||
|
||||
// Client must respond to ping at a specific interval, otherwise we drop connection
|
||||
hb: Instant,
|
||||
}
|
||||
|
||||
impl VNCActor {
|
||||
pub async fn new(socket_path: &str) -> anyhow::Result<Self> {
|
||||
let socket_path = Path::new(socket_path);
|
||||
|
||||
if !socket_path.exists() {
|
||||
return Err(VNCError::SocketDoesNotExists.into());
|
||||
}
|
||||
|
||||
let socket = UnixStream::connect(socket_path).await?;
|
||||
let (read_half, write_half) = socket.into_split();
|
||||
|
||||
Ok(Self {
|
||||
read_half: Some(read_half),
|
||||
write_half,
|
||||
hb: Instant::now(),
|
||||
})
|
||||
}
|
||||
|
||||
/// helper method that sends ping to client every second.
|
||||
///
|
||||
/// also this method checks heartbeats from client
|
||||
fn hb(&self, ctx: &mut ws::WebsocketContext<Self>) {
|
||||
ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| {
|
||||
// check client heartbeats
|
||||
if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT {
|
||||
// heartbeat timed out
|
||||
log::warn!("WebSocket Client heartbeat failed, disconnecting!");
|
||||
ctx.stop();
|
||||
return;
|
||||
}
|
||||
|
||||
ctx.ping(b"");
|
||||
});
|
||||
}
|
||||
|
||||
fn send_to_socket(&mut self, bytes: Bytes, ctx: &mut ws::WebsocketContext<Self>) {
|
||||
log::trace!("Received {} bytes for VNC socket", bytes.len());
|
||||
|
||||
if let Err(e) = futures::executor::block_on(self.write_half.write(bytes.as_bytes())) {
|
||||
log::error!("Failed to relay bytes to VNC socket {e}");
|
||||
ctx.close(None);
|
||||
}
|
||||
}
|
||||
|
||||
fn start_qemu_to_ws_end(&mut self, ctx: &mut ws::WebsocketContext<Self>) {
|
||||
let mut read_half = self.read_half.take().unwrap();
|
||||
let addr = ctx.address();
|
||||
let future = async move {
|
||||
let mut buff: [u8; 5000] = [0; 5000];
|
||||
loop {
|
||||
match read_half.read(&mut buff).await {
|
||||
Ok(mut l) => {
|
||||
if l == 0 {
|
||||
log::warn!("Got empty read!");
|
||||
|
||||
// Ugly hack made to wait for next byte
|
||||
let mut one_byte_buff: [u8; 1] = [0; 1];
|
||||
match read_half.read_exact(&mut one_byte_buff).await {
|
||||
Ok(b) => {
|
||||
if b == 0 {
|
||||
log::error!("Did not get a byte !");
|
||||
let _ = addr.send(CloseWebSocketReq).await;
|
||||
break;
|
||||
}
|
||||
buff[0] = one_byte_buff[0];
|
||||
l = 1;
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to read 1 BYTE from remote socket. Stopping now... {:?}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let to_send = SendBytesReq(Vec::from(&buff[0..l]));
|
||||
if let Err(e) = addr.send(to_send).await {
|
||||
log::error!("Failed to send to websocket. Stopping now... {:?}", e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to read from remote socket. Stopping now... {:?}", e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
log::info!("Exited read loop");
|
||||
};
|
||||
|
||||
tokio::spawn(future);
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for VNCActor {
|
||||
type Context = ws::WebsocketContext<Self>;
|
||||
|
||||
fn started(&mut self, ctx: &mut Self::Context) {
|
||||
self.hb(ctx);
|
||||
self.start_qemu_to_ws_end(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamHandler<Result<Message, ws::ProtocolError>> for VNCActor {
|
||||
fn handle(&mut self, msg: Result<Message, ws::ProtocolError>, ctx: &mut Self::Context) {
|
||||
match msg {
|
||||
Ok(Message::Ping(msg)) => ctx.pong(&msg),
|
||||
|
||||
Ok(Message::Text(_text)) => {
|
||||
log::error!("Received unexpected text on VNC WebSocket!");
|
||||
}
|
||||
|
||||
Ok(Message::Binary(bin)) => {
|
||||
log::info!("Forward {} bytes to VNC server", bin.len());
|
||||
self.send_to_socket(bin, ctx);
|
||||
}
|
||||
|
||||
Ok(Message::Continuation(msg)) => match msg {
|
||||
Item::FirstText(_) => {
|
||||
log::error!("Received unexpected split text!");
|
||||
ctx.close(None);
|
||||
}
|
||||
Item::FirstBinary(bin) | Item::Continue(bin) | Item::Last(bin) => {
|
||||
self.send_to_socket(bin, ctx);
|
||||
}
|
||||
},
|
||||
|
||||
Ok(Message::Pong(_)) => {
|
||||
log::trace!("Received PONG message");
|
||||
self.hb = Instant::now();
|
||||
}
|
||||
|
||||
Ok(Message::Close(r)) => {
|
||||
log::info!("WebSocket closed. Reason={r:?}");
|
||||
ctx.close(r);
|
||||
}
|
||||
|
||||
Ok(Message::Nop) => {
|
||||
log::debug!("Received Nop message")
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
log::error!("WebSocket protocol error! {e}");
|
||||
ctx.close(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
pub struct SendBytesReq(Vec<u8>);
|
||||
|
||||
impl Handler<SendBytesReq> for VNCActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, msg: SendBytesReq, ctx: &mut Self::Context) -> Self::Result {
|
||||
log::trace!("Send {} bytes to WS", msg.0.len());
|
||||
ctx.binary(msg.0);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(actix::Message)]
|
||||
#[rtype(result = "()")]
|
||||
pub struct CloseWebSocketReq;
|
||||
|
||||
impl Handler<CloseWebSocketReq> for VNCActor {
|
||||
type Result = ();
|
||||
|
||||
fn handle(&mut self, _msg: CloseWebSocketReq, ctx: &mut Self::Context) -> Self::Result {
|
||||
log::trace!("Close websocket, because VNC socket has terminated");
|
||||
ctx.close(None);
|
||||
}
|
||||
}
|
@ -1,129 +0,0 @@
|
||||
use actix_http::ws::Message;
|
||||
use futures_util::StreamExt as _;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::UnixStream;
|
||||
use tokio::select;
|
||||
use tokio::time::interval;
|
||||
|
||||
/// How often heartbeat pings are sent
|
||||
const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
||||
/// How long before lack of client response causes a timeout
|
||||
const CLIENT_TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
/// Broadcast text & binary messages received from a client, respond to ping messages, and monitor
|
||||
/// connection health to detect network issues and free up resources.
|
||||
pub async fn handle(
|
||||
mut session: actix_ws::Session,
|
||||
mut msg_stream: actix_ws::MessageStream,
|
||||
mut socket: UnixStream,
|
||||
) {
|
||||
log::info!("Connected to websocket");
|
||||
|
||||
let mut last_heartbeat = Instant::now();
|
||||
let mut interval = interval(HEARTBEAT_INTERVAL);
|
||||
|
||||
let mut buf_socket = [0u8; 1024];
|
||||
|
||||
let reason = loop {
|
||||
// waits for either `msg_stream` to receive a message from the client, the broadcast channel
|
||||
// to send a message, or the heartbeat interval timer to tick, yielding the value of
|
||||
// whichever one is ready first
|
||||
select! {
|
||||
|
||||
// heartbeat interval ticked
|
||||
_tick = interval.tick() => {
|
||||
// if no heartbeat ping/pong received recently, close the connection
|
||||
if Instant::now().duration_since(last_heartbeat) > CLIENT_TIMEOUT {
|
||||
log::info!(
|
||||
"client has not sent heartbeat in over {CLIENT_TIMEOUT:?}; disconnecting"
|
||||
);
|
||||
|
||||
break None;
|
||||
}
|
||||
|
||||
// send heartbeat ping
|
||||
let _ = session.ping(b"").await;
|
||||
}
|
||||
|
||||
msg = msg_stream.next() => {
|
||||
let msg = match msg {
|
||||
// received message from WebSocket client
|
||||
Some(Ok(msg)) => msg,
|
||||
|
||||
// client WebSocket stream error
|
||||
Some(Err(err)) => {
|
||||
log::error!("{err}");
|
||||
break None;
|
||||
}
|
||||
|
||||
// client WebSocket stream ended
|
||||
None => break None
|
||||
};
|
||||
|
||||
log::debug!("msg: {msg:?}");
|
||||
|
||||
match msg {
|
||||
Message::Text(_) => {
|
||||
log::error!("Received unexpected text on VNC WebSocket!");
|
||||
}
|
||||
|
||||
Message::Binary(bin) => {
|
||||
log::info!("Forward {} bytes to VNC server", bin.len());
|
||||
if let Err(e) = socket.write(&bin).await {
|
||||
log::error!("Failed to relay bytes to VNC socket {e}");
|
||||
break None;
|
||||
}
|
||||
}
|
||||
|
||||
Message::Close(reason) => {
|
||||
break reason;
|
||||
}
|
||||
|
||||
Message::Ping(bytes) => {
|
||||
last_heartbeat = Instant::now();
|
||||
let _ = session.pong(&bytes).await;
|
||||
}
|
||||
|
||||
Message::Pong(_) => {
|
||||
last_heartbeat = Instant::now();
|
||||
}
|
||||
|
||||
Message::Continuation(_) => {
|
||||
log::warn!("no support for continuation frames");
|
||||
}
|
||||
|
||||
// no-op; ignore
|
||||
Message::Nop => {}
|
||||
};
|
||||
}
|
||||
|
||||
// Forward socket packet to WS client
|
||||
count = socket.read(&mut buf_socket) => {
|
||||
let count = match count {
|
||||
Ok(count) => count,
|
||||
Err(e) => {
|
||||
log::error!("[VNC] Failed to read from upstream! {e}");
|
||||
break None;
|
||||
}
|
||||
};
|
||||
|
||||
if count == 0 {
|
||||
log::warn!("[VNC] infinite loop (upstream), closing connection");
|
||||
break None;
|
||||
}
|
||||
|
||||
if let Err(e)=session.binary(buf_socket[0..count].to_vec()).await{
|
||||
log::error!("[VNC] Failed to forward messages to upstream, will close connection! {e}");
|
||||
break None
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// attempt to close connection gracefully
|
||||
let _ = session.close(reason).await;
|
||||
|
||||
log::info!("Disconnected from websocket");
|
||||
}
|
@ -68,7 +68,7 @@ pub struct AppConfig {
|
||||
#[arg(
|
||||
long,
|
||||
env,
|
||||
default_value = "http://localhost:9001/dex/.well-known/openid-configuration"
|
||||
default_value = "http://localhost:9001/.well-known/openid-configuration"
|
||||
)]
|
||||
pub oidc_configuration_url: String,
|
||||
|
||||
@ -245,34 +245,11 @@ impl AppConfig {
|
||||
storage_path.canonicalize().unwrap()
|
||||
}
|
||||
|
||||
/// Get iso files storage directory
|
||||
/// Get iso storage directory
|
||||
pub fn iso_storage_path(&self) -> PathBuf {
|
||||
self.storage_path().join("iso")
|
||||
}
|
||||
|
||||
/// Get the path where generated cloud init disk image are stored
|
||||
pub fn cloud_init_disk_storage_path(&self) -> PathBuf {
|
||||
self.storage_path().join("cloud_init_disks")
|
||||
}
|
||||
|
||||
/// Get the path where the disk image of a VM is stored
|
||||
pub fn cloud_init_disk_path_for_vm(&self, name: &str) -> PathBuf {
|
||||
self.cloud_init_disk_storage_path().join(format!(
|
||||
"{}-{name}.iso",
|
||||
constants::CLOUD_INIT_IMAGE_PREFIX_NAME
|
||||
))
|
||||
}
|
||||
|
||||
/// Get disk images storage directory
|
||||
pub fn disk_images_storage_path(&self) -> PathBuf {
|
||||
self.storage_path().join("disk_images")
|
||||
}
|
||||
|
||||
/// Get the path of a disk image file
|
||||
pub fn disk_images_file_path(&self, name: &str) -> PathBuf {
|
||||
self.disk_images_storage_path().join(name)
|
||||
}
|
||||
|
||||
/// Get VM vnc sockets directory
|
||||
pub fn vnc_sockets_path(&self) -> PathBuf {
|
||||
self.storage_path().join("vnc")
|
||||
@ -283,17 +260,15 @@ impl AppConfig {
|
||||
self.vnc_sockets_path().join(format!("vnc-{}", name))
|
||||
}
|
||||
|
||||
/// Get VM root disks storage directory
|
||||
pub fn root_vm_disks_storage_path(&self) -> PathBuf {
|
||||
/// Get VM vnc sockets directory
|
||||
pub fn disks_storage_path(&self) -> PathBuf {
|
||||
self.storage_path().join("disks")
|
||||
}
|
||||
|
||||
/// Get specific VM disk storage directory
|
||||
pub fn vm_storage_path(&self, id: XMLUuid) -> PathBuf {
|
||||
self.root_vm_disks_storage_path().join(id.as_string())
|
||||
self.disks_storage_path().join(id.as_string())
|
||||
}
|
||||
|
||||
/// Get the path were VM definitions are backed up
|
||||
pub fn definitions_path(&self) -> PathBuf {
|
||||
self.storage_path().join("definitions")
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
use crate::utils::file_size_utils::FileSize;
|
||||
|
||||
/// Name of the cookie that contains session information
|
||||
pub const SESSION_COOKIE_NAME: &str = "X-auth-token";
|
||||
|
||||
@ -19,32 +17,20 @@ pub const ROUTES_WITHOUT_AUTH: [&str; 5] = [
|
||||
];
|
||||
|
||||
/// Allowed ISO mimetypes
|
||||
pub const ALLOWED_ISO_MIME_TYPES: [&str; 4] = [
|
||||
pub const ALLOWED_ISO_MIME_TYPES: [&str; 3] = [
|
||||
"application/x-cd-image",
|
||||
"application/x-iso9660-image",
|
||||
"application/octet-stream",
|
||||
"application/vnd.efi.iso",
|
||||
];
|
||||
|
||||
/// ISO max size
|
||||
pub const ISO_MAX_SIZE: FileSize = FileSize::from_gb(10);
|
||||
pub const ISO_MAX_SIZE: usize = 10 * 1000 * 1000 * 1000;
|
||||
|
||||
/// Allowed uploaded disk images formats
|
||||
pub const ALLOWED_DISK_IMAGES_MIME_TYPES: [&str; 4] = [
|
||||
"application/x-qemu-disk",
|
||||
"application/x-raw-disk-image",
|
||||
"application/gzip",
|
||||
"application/octet-stream",
|
||||
];
|
||||
/// Min VM memory size (MB)
|
||||
pub const MIN_VM_MEMORY: usize = 100;
|
||||
|
||||
/// Disk image max size
|
||||
pub const DISK_IMAGE_MAX_SIZE: FileSize = FileSize::from_gb(10 * 1000);
|
||||
|
||||
/// Min VM memory size
|
||||
pub const MIN_VM_MEMORY: FileSize = FileSize::from_mb(100);
|
||||
|
||||
/// Max VM memory size
|
||||
pub const MAX_VM_MEMORY: FileSize = FileSize::from_gb(64);
|
||||
/// Max VM memory size (MB)
|
||||
pub const MAX_VM_MEMORY: usize = 64000;
|
||||
|
||||
/// Disk name min length
|
||||
pub const DISK_NAME_MIN_LEN: usize = 2;
|
||||
@ -52,14 +38,11 @@ pub const DISK_NAME_MIN_LEN: usize = 2;
|
||||
/// Disk name max length
|
||||
pub const DISK_NAME_MAX_LEN: usize = 10;
|
||||
|
||||
/// Disk size min (B)
|
||||
pub const DISK_SIZE_MIN: FileSize = FileSize::from_mb(50);
|
||||
/// Disk size min (MB)
|
||||
pub const DISK_SIZE_MIN: usize = 100;
|
||||
|
||||
/// Disk size max (B)
|
||||
pub const DISK_SIZE_MAX: FileSize = FileSize::from_gb(20000);
|
||||
|
||||
/// Cloud init generated disk image prefix
|
||||
pub const CLOUD_INIT_IMAGE_PREFIX_NAME: &str = "virtweb-cloudinit-autogen-image";
|
||||
/// Disk size max (MB)
|
||||
pub const DISK_SIZE_MAX: usize = 1000 * 1000 * 2;
|
||||
|
||||
/// Net nat entry comment max size
|
||||
pub const NET_NAT_COMMENT_MAX_SIZE: usize = 250;
|
||||
@ -124,27 +107,3 @@ pub const API_TOKEN_DESCRIPTION_MAX_LENGTH: usize = 30;
|
||||
|
||||
/// API token right path max length
|
||||
pub const API_TOKEN_RIGHT_PATH_MAX_LENGTH: usize = 255;
|
||||
|
||||
/// Qemu image program path
|
||||
pub const PROGRAM_QEMU_IMAGE: &str = "/usr/bin/qemu-img";
|
||||
|
||||
/// IP program path
|
||||
pub const PROGRAM_IP: &str = "/usr/sbin/ip";
|
||||
|
||||
/// Copy program path
|
||||
pub const PROGRAM_COPY: &str = "/bin/cp";
|
||||
|
||||
/// Gzip program path
|
||||
pub const PROGRAM_GZIP: &str = "/usr/bin/gzip";
|
||||
|
||||
/// XZ program path
|
||||
pub const PROGRAM_XZ: &str = "/usr/bin/xz";
|
||||
|
||||
/// Bash program
|
||||
pub const PROGRAM_BASH: &str = "/usr/bin/bash";
|
||||
|
||||
/// DD program
|
||||
pub const PROGRAM_DD: &str = "/usr/bin/dd";
|
||||
|
||||
/// cloud-localds program
|
||||
pub const PROGRAM_CLOUD_LOCALDS: &str = "/usr/bin/cloud-localds";
|
||||
|
@ -2,9 +2,9 @@
|
||||
|
||||
use crate::api_tokens;
|
||||
use crate::api_tokens::{NewToken, TokenID, TokenRights};
|
||||
use crate::controllers::HttpResult;
|
||||
use crate::controllers::api_tokens_controller::rest_token::RestToken;
|
||||
use actix_web::{HttpResponse, web};
|
||||
use crate::controllers::HttpResult;
|
||||
use actix_web::{web, HttpResponse};
|
||||
use basic_jwt::JWTPrivateKey;
|
||||
|
||||
/// Create a special module for REST token to enforce usage of constructor function
|
||||
|
@ -1,6 +1,6 @@
|
||||
use actix_remote_ip::RemoteIP;
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{HttpResponse, Responder, web};
|
||||
use actix_web::{web, HttpResponse, Responder};
|
||||
use light_openid::basic_state_manager::BasicStateManager;
|
||||
|
||||
use crate::app_config::AppConfig;
|
||||
|
@ -1,255 +0,0 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::vm::VMInfo;
|
||||
use crate::utils::file_disks_utils::{DiskFileFormat, DiskFileInfo};
|
||||
use crate::utils::files_utils;
|
||||
use actix_files::NamedFile;
|
||||
use actix_multipart::form::MultipartForm;
|
||||
use actix_multipart::form::tempfile::TempFile;
|
||||
use actix_web::{HttpRequest, HttpResponse, web};
|
||||
|
||||
#[derive(Debug, MultipartForm)]
|
||||
pub struct UploadDiskImageForm {
|
||||
#[multipart(rename = "file")]
|
||||
files: Vec<TempFile>,
|
||||
}
|
||||
|
||||
/// Upload disk image file
|
||||
pub async fn upload(MultipartForm(mut form): MultipartForm<UploadDiskImageForm>) -> HttpResult {
|
||||
if form.files.is_empty() {
|
||||
log::error!("Missing uploaded disk file!");
|
||||
return Ok(HttpResponse::BadRequest().json("Missing file!"));
|
||||
}
|
||||
|
||||
let file = form.files.remove(0);
|
||||
|
||||
// Check uploaded file size
|
||||
if file.size > constants::DISK_IMAGE_MAX_SIZE.as_bytes() {
|
||||
return Ok(HttpResponse::BadRequest().json("Disk image max size exceeded!"));
|
||||
}
|
||||
|
||||
// Check file mime type
|
||||
if let Some(mime_type) = file.content_type {
|
||||
if !constants::ALLOWED_DISK_IMAGES_MIME_TYPES.contains(&mime_type.as_ref()) {
|
||||
return Ok(HttpResponse::BadRequest().json(format!(
|
||||
"Unsupported file type for disk upload: {}",
|
||||
mime_type
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
// Extract and check file name
|
||||
let Some(file_name) = file.file_name else {
|
||||
return Ok(HttpResponse::BadRequest().json("Missing file name of uploaded file!"));
|
||||
};
|
||||
if !files_utils::check_file_name(&file_name) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid uploaded file name!"));
|
||||
}
|
||||
|
||||
// Check if a file with the same name already exists
|
||||
let dest_path = AppConfig::get().disk_images_file_path(&file_name);
|
||||
if dest_path.is_file() {
|
||||
return Ok(HttpResponse::Conflict().json("A file with the same name already exists!"));
|
||||
}
|
||||
|
||||
// Copy the file to the destination
|
||||
file.file.persist(&dest_path)?;
|
||||
|
||||
// Check if file information can be loaded
|
||||
if let Err(e) = DiskFileInfo::load_file(&dest_path) {
|
||||
log::error!("Failed to get information about uploaded disk file! {e}");
|
||||
std::fs::remove_file(&dest_path)?;
|
||||
return Ok(HttpResponse::InternalServerError()
|
||||
.json(format!("Unable to process uploaded file! {e}")));
|
||||
}
|
||||
|
||||
Ok(HttpResponse::Ok().json("Successfully uploaded disk image!"))
|
||||
}
|
||||
|
||||
/// Get disk images list
|
||||
pub async fn get_list() -> HttpResult {
|
||||
let mut list = vec![];
|
||||
for entry in AppConfig::get().disk_images_storage_path().read_dir()? {
|
||||
let entry = entry?;
|
||||
list.push(DiskFileInfo::load_file(&entry.path())?);
|
||||
}
|
||||
|
||||
Ok(HttpResponse::Ok().json(list))
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct DiskFilePath {
|
||||
filename: String,
|
||||
}
|
||||
|
||||
/// Download disk image
|
||||
pub async fn download(p: web::Path<DiskFilePath>, req: HttpRequest) -> HttpResult {
|
||||
if !files_utils::check_file_name(&p.filename) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid file name!"));
|
||||
}
|
||||
|
||||
let file_path = AppConfig::get().disk_images_file_path(&p.filename);
|
||||
|
||||
if !file_path.exists() {
|
||||
return Ok(HttpResponse::NotFound().json("Disk image does not exists!"));
|
||||
}
|
||||
|
||||
Ok(NamedFile::open(file_path)?.into_response(&req))
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct ConvertDiskImageRequest {
|
||||
dest_file_name: String,
|
||||
#[serde(flatten)]
|
||||
format: DiskFileFormat,
|
||||
}
|
||||
|
||||
/// Convert disk image into a new format
|
||||
pub async fn convert(
|
||||
p: web::Path<DiskFilePath>,
|
||||
req: web::Json<ConvertDiskImageRequest>,
|
||||
) -> HttpResult {
|
||||
if !files_utils::check_file_name(&p.filename) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid src file name!"));
|
||||
}
|
||||
|
||||
let src_file_path = AppConfig::get().disk_images_file_path(&p.filename);
|
||||
|
||||
let src = DiskFileInfo::load_file(&src_file_path)?;
|
||||
|
||||
handle_convert_request(src, &req).await
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct BackupVMDiskPath {
|
||||
uid: XMLUuid,
|
||||
diskid: String,
|
||||
}
|
||||
|
||||
/// Perform disk backup
|
||||
pub async fn backup_disk(
|
||||
client: LibVirtReq,
|
||||
path: web::Path<BackupVMDiskPath>,
|
||||
req: web::Json<ConvertDiskImageRequest>,
|
||||
) -> HttpResult {
|
||||
// Get the VM information
|
||||
let info = match client.get_single_domain(path.uid).await {
|
||||
Ok(i) => i,
|
||||
Err(e) => {
|
||||
log::error!("Failed to get domain info! {e}");
|
||||
return Ok(HttpResponse::InternalServerError().json(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let vm = VMInfo::from_domain(info)?;
|
||||
|
||||
// Load disk information
|
||||
let Some(disk) = vm
|
||||
.file_disks
|
||||
.into_iter()
|
||||
.find(|disk| disk.name == path.diskid)
|
||||
else {
|
||||
return Ok(HttpResponse::NotFound()
|
||||
.json(format!("Disk {} not found for vm {}", path.diskid, vm.name)));
|
||||
};
|
||||
|
||||
let src_path = disk.disk_path(vm.uuid.expect("Missing VM uuid!"));
|
||||
let src_disk = DiskFileInfo::load_file(&src_path)?;
|
||||
|
||||
// Perform conversion
|
||||
handle_convert_request(src_disk, &req).await
|
||||
}
|
||||
|
||||
/// Generic controller code that performs image conversion to create a disk image file
|
||||
pub async fn handle_convert_request(
|
||||
src: DiskFileInfo,
|
||||
req: &ConvertDiskImageRequest,
|
||||
) -> HttpResult {
|
||||
// Check destination file
|
||||
if !files_utils::check_file_name(&req.dest_file_name) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid destination file name!"));
|
||||
}
|
||||
if !req
|
||||
.format
|
||||
.ext()
|
||||
.iter()
|
||||
.any(|e| req.dest_file_name.ends_with(e))
|
||||
{
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid destination file extension!"));
|
||||
}
|
||||
|
||||
let dst_file_path = AppConfig::get().disk_images_file_path(&req.dest_file_name);
|
||||
|
||||
if dst_file_path.exists() {
|
||||
return Ok(HttpResponse::Conflict().json("Specified destination file already exists!"));
|
||||
}
|
||||
|
||||
// Perform conversion
|
||||
if let Err(e) = src.convert(&dst_file_path, req.format) {
|
||||
log::error!("Disk file conversion error: {e}");
|
||||
return Ok(
|
||||
HttpResponse::InternalServerError().json(format!("Disk file conversion error: {e}"))
|
||||
);
|
||||
}
|
||||
|
||||
Ok(HttpResponse::Accepted().json("Successfully converted disk file"))
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct RenameDiskImageRequest {
|
||||
name: String,
|
||||
}
|
||||
|
||||
/// Rename disk image
|
||||
pub async fn rename(
|
||||
p: web::Path<DiskFilePath>,
|
||||
req: web::Json<RenameDiskImageRequest>,
|
||||
) -> HttpResult {
|
||||
// Check source
|
||||
if !files_utils::check_file_name(&p.filename) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid src file name!"));
|
||||
}
|
||||
let src_path = AppConfig::get().disk_images_file_path(&p.filename);
|
||||
if !src_path.exists() {
|
||||
return Ok(HttpResponse::NotFound().json("Disk image does not exists!"));
|
||||
}
|
||||
|
||||
// Check destination
|
||||
if !files_utils::check_file_name(&req.name) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid dst file name!"));
|
||||
}
|
||||
let dst_path = AppConfig::get().disk_images_file_path(&req.name);
|
||||
if dst_path.exists() {
|
||||
return Ok(HttpResponse::Conflict().json("Destination name already exists!"));
|
||||
}
|
||||
|
||||
// Check extension
|
||||
let disk = DiskFileInfo::load_file(&src_path)?;
|
||||
if !disk.format.ext().iter().any(|e| req.name.ends_with(e)) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid destination file extension!"));
|
||||
}
|
||||
|
||||
// Perform rename
|
||||
std::fs::rename(&src_path, &dst_path)?;
|
||||
|
||||
Ok(HttpResponse::Accepted().finish())
|
||||
}
|
||||
|
||||
/// Delete a disk image
|
||||
pub async fn delete(p: web::Path<DiskFilePath>) -> HttpResult {
|
||||
if !files_utils::check_file_name(&p.filename) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid file name!"));
|
||||
}
|
||||
|
||||
let file_path = AppConfig::get().disk_images_file_path(&p.filename);
|
||||
|
||||
if !file_path.exists() {
|
||||
return Ok(HttpResponse::NotFound().json("Disk image does not exists!"));
|
||||
}
|
||||
|
||||
std::fs::remove_file(file_path)?;
|
||||
|
||||
Ok(HttpResponse::Accepted().finish())
|
||||
}
|
@ -1,148 +0,0 @@
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::extractors::group_vm_id_extractor::GroupVmIdExtractor;
|
||||
use crate::libvirt_rest_structures::vm::VMInfo;
|
||||
use actix_web::HttpResponse;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Get the list of groups
|
||||
pub async fn list(client: LibVirtReq) -> HttpResult {
|
||||
let groups = match client.get_full_groups_list().await {
|
||||
Err(e) => {
|
||||
log::error!("Failed to get the list of groups! {e}");
|
||||
return Ok(HttpResponse::InternalServerError()
|
||||
.json(format!("Failed to get the list of groups! {e}")));
|
||||
}
|
||||
Ok(l) => l,
|
||||
};
|
||||
|
||||
Ok(HttpResponse::Ok().json(groups))
|
||||
}
|
||||
|
||||
/// Get information about the VMs of a group
|
||||
pub async fn vm_info(vms_xml: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut vms = Vec::new();
|
||||
for vm in vms_xml.0 {
|
||||
vms.push(VMInfo::from_domain(vm)?)
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(vms))
|
||||
}
|
||||
|
||||
#[derive(Default, serde::Serialize)]
|
||||
pub struct TreatmentResult {
|
||||
ok: usize,
|
||||
failed: usize,
|
||||
}
|
||||
|
||||
/// Start the VMs of a group
|
||||
pub async fn vm_start(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut res = TreatmentResult::default();
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
match client.start_domain(uuid).await {
|
||||
Ok(_) => res.ok += 1,
|
||||
Err(_) => res.failed += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
/// Shutdown the VMs of a group
|
||||
pub async fn vm_shutdown(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut res = TreatmentResult::default();
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
match client.shutdown_domain(uuid).await {
|
||||
Ok(_) => res.ok += 1,
|
||||
Err(_) => res.failed += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
/// Suspend the VMs of a group
|
||||
pub async fn vm_suspend(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut res = TreatmentResult::default();
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
match client.suspend_domain(uuid).await {
|
||||
Ok(_) => res.ok += 1,
|
||||
Err(_) => res.failed += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
/// Resume the VMs of a group
|
||||
pub async fn vm_resume(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut res = TreatmentResult::default();
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
match client.resume_domain(uuid).await {
|
||||
Ok(_) => res.ok += 1,
|
||||
Err(_) => res.failed += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
/// Kill the VMs of a group
|
||||
pub async fn vm_kill(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut res = TreatmentResult::default();
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
match client.kill_domain(uuid).await {
|
||||
Ok(_) => res.ok += 1,
|
||||
Err(_) => res.failed += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
/// Reset the VMs of a group
|
||||
pub async fn vm_reset(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut res = TreatmentResult::default();
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
match client.reset_domain(uuid).await {
|
||||
Ok(_) => res.ok += 1,
|
||||
Err(_) => res.failed += 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HttpResponse::Ok().json(res))
|
||||
}
|
||||
|
||||
/// Get the screenshot of the VMs of a group
|
||||
pub async fn vm_screenshot(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
if vms.0.is_empty() {
|
||||
return Ok(HttpResponse::NoContent().finish());
|
||||
}
|
||||
|
||||
let image = if vms.0.len() == 1 {
|
||||
client.screenshot_domain(vms.0[0].uuid.unwrap()).await?
|
||||
} else {
|
||||
return Ok(
|
||||
HttpResponse::UnprocessableEntity().json("Cannot return multiple VM screenshots!!")
|
||||
);
|
||||
};
|
||||
|
||||
Ok(HttpResponse::Ok().content_type("image/png").body(image))
|
||||
}
|
||||
|
||||
/// Get the state of the VMs
|
||||
pub async fn vm_state(client: LibVirtReq, vms: GroupVmIdExtractor) -> HttpResult {
|
||||
let mut states = HashMap::new();
|
||||
|
||||
for vm in vms.0 {
|
||||
if let Some(uuid) = vm.uuid {
|
||||
states.insert(uuid, client.get_domain_state(uuid).await?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(HttpResponse::Ok().json(states))
|
||||
}
|
@ -3,9 +3,9 @@ use crate::constants;
|
||||
use crate::controllers::HttpResult;
|
||||
use crate::utils::files_utils;
|
||||
use actix_files::NamedFile;
|
||||
use actix_multipart::form::MultipartForm;
|
||||
use actix_multipart::form::tempfile::TempFile;
|
||||
use actix_web::{HttpRequest, HttpResponse, web};
|
||||
use actix_multipart::form::MultipartForm;
|
||||
use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use futures_util::StreamExt;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
@ -26,7 +26,7 @@ pub async fn upload_file(MultipartForm(mut form): MultipartForm<UploadIsoForm>)
|
||||
|
||||
let file = form.files.remove(0);
|
||||
|
||||
if file.size > constants::ISO_MAX_SIZE.as_bytes() {
|
||||
if file.size > constants::ISO_MAX_SIZE {
|
||||
log::error!("Uploaded ISO file is too large!");
|
||||
return Ok(HttpResponse::BadRequest().json("File is too large!"));
|
||||
}
|
||||
@ -88,7 +88,7 @@ pub async fn upload_from_url(req: web::Json<DownloadFromURLReq>) -> HttpResult {
|
||||
let response = reqwest::get(&req.url).await?;
|
||||
|
||||
if let Some(len) = response.content_length() {
|
||||
if len > constants::ISO_MAX_SIZE.as_bytes() as u64 {
|
||||
if len > constants::ISO_MAX_SIZE as u64 {
|
||||
return Ok(HttpResponse::BadRequest().json("File is too large!"));
|
||||
}
|
||||
}
|
||||
@ -132,12 +132,12 @@ pub async fn get_list() -> HttpResult {
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct IsoFilePath {
|
||||
pub struct DownloadFilePath {
|
||||
filename: String,
|
||||
}
|
||||
|
||||
/// Download ISO file
|
||||
pub async fn download_file(p: web::Path<IsoFilePath>, req: HttpRequest) -> HttpResult {
|
||||
pub async fn download_file(p: web::Path<DownloadFilePath>, req: HttpRequest) -> HttpResult {
|
||||
if !files_utils::check_file_name(&p.filename) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid file name!"));
|
||||
}
|
||||
@ -152,7 +152,7 @@ pub async fn download_file(p: web::Path<IsoFilePath>, req: HttpRequest) -> HttpR
|
||||
}
|
||||
|
||||
/// Delete ISO file
|
||||
pub async fn delete_file(p: web::Path<IsoFilePath>) -> HttpResult {
|
||||
pub async fn delete_file(p: web::Path<DownloadFilePath>) -> HttpResult {
|
||||
if !files_utils::check_file_name(&p.filename) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid file name!"));
|
||||
}
|
||||
|
@ -1,15 +1,13 @@
|
||||
use crate::libvirt_client::LibVirtClient;
|
||||
use actix_http::StatusCode;
|
||||
use actix_web::body::BoxBody;
|
||||
use actix_web::{HttpResponse, web};
|
||||
use actix_web::{web, HttpResponse};
|
||||
use std::error::Error;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use zip::result::ZipError;
|
||||
use std::io::ErrorKind;
|
||||
|
||||
pub mod api_tokens_controller;
|
||||
pub mod auth_controller;
|
||||
pub mod disk_images_controller;
|
||||
pub mod groups_controller;
|
||||
pub mod iso_controller;
|
||||
pub mod network_controller;
|
||||
pub mod nwfilter_controller;
|
||||
@ -63,7 +61,7 @@ impl From<serde_json::Error> for HttpErr {
|
||||
|
||||
impl From<Box<dyn Error>> for HttpErr {
|
||||
fn from(value: Box<dyn Error>) -> Self {
|
||||
HttpErr::Err(std::io::Error::other(value.to_string()).into())
|
||||
HttpErr::Err(std::io::Error::new(ErrorKind::Other, value.to_string()).into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -99,13 +97,7 @@ impl From<reqwest::header::ToStrError> for HttpErr {
|
||||
|
||||
impl From<actix_web::Error> for HttpErr {
|
||||
fn from(value: actix_web::Error) -> Self {
|
||||
HttpErr::Err(std::io::Error::other(value.to_string()).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ZipError> for HttpErr {
|
||||
fn from(value: ZipError) -> Self {
|
||||
HttpErr::Err(std::io::Error::other(value.to_string()).into())
|
||||
HttpErr::Err(std::io::Error::new(ErrorKind::Other, value.to_string()).into())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::net::NetworkInfo;
|
||||
use actix_web::{HttpResponse, web};
|
||||
use actix_web::{web, HttpResponse};
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub struct NetworkID {
|
||||
|
@ -2,7 +2,7 @@ use crate::constants;
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::nw_filter::NetworkFilter;
|
||||
use actix_web::{HttpResponse, web};
|
||||
use actix_web::{web, HttpResponse};
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub struct NetworkFilterID {
|
||||
|
@ -1,24 +1,14 @@
|
||||
use crate::actors::vnc_tokens_actor::VNC_TOKEN_LIFETIME;
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::constants::{DISK_NAME_MAX_LEN, DISK_NAME_MIN_LEN, DISK_SIZE_MAX, DISK_SIZE_MIN};
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::extractors::local_auth_extractor::LocalAuthEnabled;
|
||||
use crate::libvirt_rest_structures::hypervisor::HypervisorInfo;
|
||||
use crate::libvirt_rest_structures::net::NetworkInfo;
|
||||
use crate::libvirt_rest_structures::nw_filter::NetworkFilter;
|
||||
use crate::libvirt_rest_structures::vm::VMInfo;
|
||||
use crate::nat::nat_hook;
|
||||
use crate::utils::net_utils;
|
||||
use crate::utils::time_utils::{format_date, time};
|
||||
use crate::{api_tokens, constants};
|
||||
use actix_files::NamedFile;
|
||||
use actix_web::{HttpRequest, HttpResponse, Responder};
|
||||
use serde::Serialize;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use actix_web::{HttpResponse, Responder};
|
||||
use sysinfo::{Components, Disks, Networks, System};
|
||||
use zip::ZipWriter;
|
||||
use zip::write::SimpleFileOptions;
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct StaticConfig {
|
||||
@ -26,7 +16,6 @@ struct StaticConfig {
|
||||
local_auth_enabled: bool,
|
||||
oidc_auth_enabled: bool,
|
||||
iso_mimetypes: &'static [&'static str],
|
||||
disk_images_mimetypes: &'static [&'static str],
|
||||
net_mac_prefix: &'static str,
|
||||
builtin_nwfilter_rules: &'static [&'static str],
|
||||
nwfilter_chains: &'static [&'static str],
|
||||
@ -48,15 +37,12 @@ struct SLenConstraints {
|
||||
#[derive(serde::Serialize)]
|
||||
struct ServerConstraints {
|
||||
iso_max_size: usize,
|
||||
disk_image_max_size: usize,
|
||||
vnc_token_duration: u64,
|
||||
vm_name_size: LenConstraints,
|
||||
vm_title_size: LenConstraints,
|
||||
group_id_size: LenConstraints,
|
||||
memory_size: LenConstraints,
|
||||
disk_name_size: LenConstraints,
|
||||
disk_size: LenConstraints,
|
||||
disk_image_name_size: LenConstraints,
|
||||
net_name_size: LenConstraints,
|
||||
net_title_size: LenConstraints,
|
||||
net_nat_comment_size: LenConstraints,
|
||||
@ -76,34 +62,29 @@ pub async fn static_config(local_auth: LocalAuthEnabled) -> impl Responder {
|
||||
local_auth_enabled: *local_auth,
|
||||
oidc_auth_enabled: !AppConfig::get().disable_oidc,
|
||||
iso_mimetypes: &constants::ALLOWED_ISO_MIME_TYPES,
|
||||
disk_images_mimetypes: &constants::ALLOWED_DISK_IMAGES_MIME_TYPES,
|
||||
net_mac_prefix: constants::NET_MAC_ADDR_PREFIX,
|
||||
builtin_nwfilter_rules: &constants::BUILTIN_NETWORK_FILTER_RULES,
|
||||
nwfilter_chains: &constants::NETWORK_CHAINS,
|
||||
constraints: ServerConstraints {
|
||||
iso_max_size: constants::ISO_MAX_SIZE.as_bytes(),
|
||||
disk_image_max_size: constants::DISK_IMAGE_MAX_SIZE.as_bytes(),
|
||||
iso_max_size: constants::ISO_MAX_SIZE,
|
||||
|
||||
vnc_token_duration: VNC_TOKEN_LIFETIME,
|
||||
|
||||
vm_name_size: LenConstraints { min: 2, max: 50 },
|
||||
vm_title_size: LenConstraints { min: 0, max: 50 },
|
||||
group_id_size: LenConstraints { min: 3, max: 50 },
|
||||
memory_size: LenConstraints {
|
||||
min: constants::MIN_VM_MEMORY.as_bytes(),
|
||||
max: constants::MAX_VM_MEMORY.as_bytes(),
|
||||
min: constants::MIN_VM_MEMORY,
|
||||
max: constants::MAX_VM_MEMORY,
|
||||
},
|
||||
disk_name_size: LenConstraints {
|
||||
min: DISK_NAME_MIN_LEN,
|
||||
max: DISK_NAME_MAX_LEN,
|
||||
},
|
||||
disk_size: LenConstraints {
|
||||
min: DISK_SIZE_MIN.as_bytes(),
|
||||
max: DISK_SIZE_MAX.as_bytes(),
|
||||
min: DISK_SIZE_MIN,
|
||||
max: DISK_SIZE_MAX,
|
||||
},
|
||||
|
||||
disk_image_name_size: LenConstraints { min: 5, max: 220 },
|
||||
|
||||
net_name_size: LenConstraints { min: 2, max: 50 },
|
||||
net_title_size: LenConstraints { min: 0, max: 50 },
|
||||
net_nat_comment_size: LenConstraints {
|
||||
@ -153,13 +134,16 @@ pub async fn server_info(client: LibVirtReq) -> HttpResult {
|
||||
system.refresh_all();
|
||||
|
||||
let mut components = Components::new();
|
||||
components.refresh(true);
|
||||
components.refresh_list();
|
||||
components.refresh();
|
||||
|
||||
let mut disks = Disks::new();
|
||||
disks.refresh(true);
|
||||
disks.refresh_list();
|
||||
disks.refresh();
|
||||
|
||||
let mut networks = Networks::new();
|
||||
networks.refresh(true);
|
||||
networks.refresh_list();
|
||||
networks.refresh();
|
||||
|
||||
Ok(HttpResponse::Ok().json(ServerInfo {
|
||||
hypervisor: client.get_info().await?,
|
||||
@ -187,7 +171,7 @@ pub async fn network_hook_status() -> HttpResult {
|
||||
|
||||
pub async fn number_vcpus() -> HttpResult {
|
||||
let mut system = System::new();
|
||||
system.refresh_cpu_all();
|
||||
system.refresh_cpu();
|
||||
let number_cpus = system.cpus().len();
|
||||
assert_ne!(number_cpus, 0, "Got invlid number of CPU!");
|
||||
|
||||
@ -205,89 +189,3 @@ pub async fn number_vcpus() -> HttpResult {
|
||||
pub async fn networks_list() -> HttpResult {
|
||||
Ok(HttpResponse::Ok().json(net_utils::net_list()))
|
||||
}
|
||||
|
||||
pub async fn bridges_list() -> HttpResult {
|
||||
Ok(HttpResponse::Ok().json(net_utils::bridges_list()?))
|
||||
}
|
||||
|
||||
/// Add JSON file to ZIP
|
||||
fn zip_json<E: Serialize, F>(
|
||||
zip: &mut ZipWriter<File>,
|
||||
dir: &str,
|
||||
content: &Vec<E>,
|
||||
file_name: F,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
F: Fn(&E) -> String,
|
||||
{
|
||||
for entry in content {
|
||||
let file_encoded = serde_json::to_string(&entry)?;
|
||||
|
||||
let options = SimpleFileOptions::default()
|
||||
.compression_method(zip::CompressionMethod::Deflated)
|
||||
.unix_permissions(0o750);
|
||||
|
||||
zip.start_file(format!("{dir}/{}.json", file_name(entry)), options)?;
|
||||
zip.write_all(file_encoded.as_bytes())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Export all configuration elements at once
|
||||
pub async fn export_all_configs(req: HttpRequest, client: LibVirtReq) -> HttpResult {
|
||||
// Perform extractions
|
||||
let vms = client
|
||||
.get_full_domains_list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(VMInfo::from_domain)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let networks = client
|
||||
.get_full_networks_list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(NetworkInfo::from_xml)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let nw_filters = client
|
||||
.get_full_network_filters_list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(NetworkFilter::lib2rest)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let tokens = api_tokens::full_list().await?;
|
||||
|
||||
// Create ZIP file
|
||||
let dest_dir = tempfile::tempdir_in(&AppConfig::get().temp_dir)?;
|
||||
let zip_path = dest_dir.path().join("export.zip");
|
||||
|
||||
let file = File::create(&zip_path)?;
|
||||
let mut zip = ZipWriter::new(file);
|
||||
|
||||
// Encode entities to JSON
|
||||
zip_json(&mut zip, "vms", &vms, |v| v.name.to_string())?;
|
||||
zip_json(&mut zip, "networks", &networks, |v| v.name.0.to_string())?;
|
||||
zip_json(
|
||||
&mut zip,
|
||||
"nw_filters",
|
||||
&nw_filters,
|
||||
|v| match constants::BUILTIN_NETWORK_FILTER_RULES.contains(&v.name.0.as_str()) {
|
||||
true => format!("builtin/{}", v.name.0),
|
||||
false => v.name.0.to_string(),
|
||||
},
|
||||
)?;
|
||||
zip_json(&mut zip, "tokens", &tokens, |v| v.id.0.to_string())?;
|
||||
|
||||
// Finalize ZIP and return response
|
||||
zip.finish()?;
|
||||
let file = File::open(zip_path)?;
|
||||
|
||||
let file = NamedFile::from_file(
|
||||
file,
|
||||
format!(
|
||||
"export_{}.zip",
|
||||
format_date(time() as i64).unwrap().replace('/', "-")
|
||||
),
|
||||
)?;
|
||||
|
||||
Ok(file.into_response(&req))
|
||||
}
|
||||
|
@ -3,27 +3,6 @@ pub use serve_static_debug::{root_index, serve_static_content};
|
||||
#[cfg(not(debug_assertions))]
|
||||
pub use serve_static_release::{root_index, serve_static_content};
|
||||
|
||||
/// Static API assets hosting
|
||||
pub mod serve_assets {
|
||||
use actix_web::{HttpResponse, web};
|
||||
use rust_embed::RustEmbed;
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "assets/"]
|
||||
struct Asset;
|
||||
|
||||
/// Serve API assets
|
||||
pub async fn serve_api_assets(file: web::Path<String>) -> HttpResponse {
|
||||
match Asset::get(&file) {
|
||||
None => HttpResponse::NotFound().body("File not found"),
|
||||
Some(asset) => HttpResponse::Ok()
|
||||
.content_type(asset.metadata.mimetype())
|
||||
.body(asset.data),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Web asset hosting placeholder in debug mode
|
||||
#[cfg(debug_assertions)]
|
||||
mod serve_static_debug {
|
||||
use actix_web::{HttpResponse, Responder};
|
||||
@ -37,20 +16,19 @@ mod serve_static_debug {
|
||||
}
|
||||
}
|
||||
|
||||
/// Web asset hosting in release mode
|
||||
#[cfg(not(debug_assertions))]
|
||||
mod serve_static_release {
|
||||
use actix_web::{HttpResponse, Responder, web};
|
||||
use actix_web::{web, HttpResponse, Responder};
|
||||
use rust_embed::RustEmbed;
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "static/"]
|
||||
struct WebAsset;
|
||||
struct Asset;
|
||||
|
||||
fn handle_embedded_file(path: &str, can_fallback: bool) -> HttpResponse {
|
||||
match (WebAsset::get(path), can_fallback) {
|
||||
match (Asset::get(path), can_fallback) {
|
||||
(Some(content), _) => HttpResponse::Ok()
|
||||
.content_type(content.metadata.mimetype())
|
||||
.content_type(mime_guess::from_path(path).first_or_octet_stream().as_ref())
|
||||
.body(content.data.into_owned()),
|
||||
(None, false) => HttpResponse::NotFound().body("404 Not Found"),
|
||||
(None, true) => handle_embedded_file("index.html", false),
|
||||
|
@ -1,12 +1,11 @@
|
||||
use crate::actors::vnc_handler;
|
||||
use crate::actors::vnc_actor::VNCActor;
|
||||
use crate::actors::vnc_tokens_actor::VNCTokensManager;
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_lib_structures::domain::DomainState;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::vm::VMInfo;
|
||||
use actix_web::{HttpRequest, HttpResponse, rt, web};
|
||||
use std::path::Path;
|
||||
use tokio::net::UnixStream;
|
||||
use actix_web::{web, HttpRequest, HttpResponse};
|
||||
use actix_web_actors::ws;
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct VMInfoAndState {
|
||||
@ -22,7 +21,7 @@ struct VMUuid {
|
||||
|
||||
/// Create a new VM
|
||||
pub async fn create(client: LibVirtReq, req: web::Json<VMInfo>) -> HttpResult {
|
||||
let domain = match req.0.as_domain() {
|
||||
let domain = match req.0.as_tomain() {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
log::error!("Failed to extract domain info! {e}");
|
||||
@ -84,8 +83,6 @@ pub async fn get_single(client: LibVirtReq, id: web::Path<SingleVMUUidReq>) -> H
|
||||
}
|
||||
};
|
||||
|
||||
log::debug!("INFO={info:#?}");
|
||||
|
||||
let state = client.get_domain_state(id.uid).await?;
|
||||
|
||||
Ok(HttpResponse::Ok().json(VMInfoAndState {
|
||||
@ -109,35 +106,13 @@ pub async fn get_single_src_def(client: LibVirtReq, id: web::Path<SingleVMUUidRe
|
||||
.body(info))
|
||||
}
|
||||
|
||||
/// Get the generated cloud init configuration disk of a vm
|
||||
pub async fn get_cloud_init_disk(client: LibVirtReq, id: web::Path<SingleVMUUidReq>) -> HttpResult {
|
||||
let info = match client.get_single_domain(id.uid).await {
|
||||
Ok(i) => i,
|
||||
Err(e) => {
|
||||
log::error!("Failed to get domain information! {e}");
|
||||
return Ok(HttpResponse::InternalServerError().json(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let vm = VMInfo::from_domain(info)?;
|
||||
let disk = vm.cloud_init.generate_nocloud_disk()?;
|
||||
|
||||
Ok(HttpResponse::Ok()
|
||||
.content_type("application/x-iso9660-image")
|
||||
.insert_header((
|
||||
"Content-Disposition",
|
||||
format!("attachment; filename=\"cloud_init_{}.iso\"", vm.name),
|
||||
))
|
||||
.body(disk))
|
||||
}
|
||||
|
||||
/// Update a VM information
|
||||
pub async fn update(
|
||||
client: LibVirtReq,
|
||||
id: web::Path<SingleVMUUidReq>,
|
||||
req: web::Json<VMInfo>,
|
||||
) -> HttpResult {
|
||||
let mut domain = match req.0.as_domain() {
|
||||
let mut domain = match req.0.as_tomain() {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
log::error!("Failed to extract domain info! {e}");
|
||||
@ -347,19 +322,5 @@ pub async fn vnc(
|
||||
};
|
||||
|
||||
log::info!("Start VNC connection on socket {socket_path}");
|
||||
|
||||
let socket_path = Path::new(&socket_path);
|
||||
if !socket_path.exists() {
|
||||
log::error!("VNC socket path {socket_path:?} does not exist!");
|
||||
return Ok(HttpResponse::ServiceUnavailable().json("VNC socket path does not exists!"));
|
||||
}
|
||||
|
||||
let socket = UnixStream::connect(socket_path).await?;
|
||||
|
||||
let (res, session, msg_stream) = actix_ws::handle(&req, stream)?;
|
||||
|
||||
// spawn websocket handler (and don't await it) so that the response is returned immediately
|
||||
rt::spawn(vnc_handler::handle(session, msg_stream, socket));
|
||||
|
||||
Ok(res)
|
||||
Ok(ws::start(VNCActor::new(&socket_path).await?, &req, stream)?)
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use actix_identity::Identity;
|
||||
use actix_web::dev::Payload;
|
||||
use actix_web::{Error, FromRequest, HttpMessage, HttpRequest};
|
||||
use futures_util::future::{Ready, ready};
|
||||
use futures_util::future::{ready, Ready};
|
||||
use std::fmt::Display;
|
||||
|
||||
pub struct AuthExtractor {
|
||||
|
@ -1,66 +0,0 @@
|
||||
use crate::controllers::LibVirtReq;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_lib_structures::domain::DomainXML;
|
||||
use crate::libvirt_rest_structures::vm::VMGroupId;
|
||||
use actix_http::Payload;
|
||||
use actix_web::error::ErrorBadRequest;
|
||||
use actix_web::web::Query;
|
||||
use actix_web::{Error, FromRequest, HttpRequest, web};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
pub struct GroupVmIdExtractor(pub Vec<DomainXML>);
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct GroupIDInPath {
|
||||
gid: VMGroupId,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct FilterVM {
|
||||
vm_id: Option<XMLUuid>,
|
||||
}
|
||||
|
||||
impl FromRequest for GroupVmIdExtractor {
|
||||
type Error = Error;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self, Self::Error>>>>;
|
||||
|
||||
fn from_request(req: &HttpRequest, _payload: &mut Payload) -> Self::Future {
|
||||
let req = req.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
let Ok(group_id) =
|
||||
web::Path::<GroupIDInPath>::from_request(&req, &mut Payload::None).await
|
||||
else {
|
||||
return Err(ErrorBadRequest("Group ID not specified in path!"));
|
||||
};
|
||||
let group_id = group_id.into_inner().gid;
|
||||
|
||||
let filter_vm = match Query::<FilterVM>::from_request(&req, &mut Payload::None).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log::error!("Failed to extract VM id from request! {e}");
|
||||
return Err(ErrorBadRequest("Failed to extract VM id from request!"));
|
||||
}
|
||||
};
|
||||
|
||||
let Ok(client) = LibVirtReq::from_request(&req, &mut Payload::None).await else {
|
||||
return Err(ErrorBadRequest("Failed to extract client handle!"));
|
||||
};
|
||||
|
||||
let vms = match client.get_full_group_vm_list(&group_id).await {
|
||||
Ok(vms) => vms,
|
||||
Err(e) => {
|
||||
log::error!("Failed to get the VMs of the group {group_id:?}: {e}");
|
||||
return Err(ErrorBadRequest("Failed to get the VMs of the group!"));
|
||||
}
|
||||
};
|
||||
|
||||
// Filter (if requested by the user)
|
||||
Ok(GroupVmIdExtractor(match filter_vm.vm_id {
|
||||
None => vms,
|
||||
Some(id) => vms.into_iter().filter(|vms| vms.uuid == Some(id)).collect(),
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use actix_web::dev::Payload;
|
||||
use actix_web::{Error, FromRequest, HttpRequest};
|
||||
use futures_util::future::{Ready, ready};
|
||||
use futures_util::future::{ready, Ready};
|
||||
use std::ops::Deref;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
|
@ -1,4 +1,3 @@
|
||||
pub mod api_auth_extractor;
|
||||
pub mod auth_extractor;
|
||||
pub mod group_vm_id_extractor;
|
||||
pub mod local_auth_extractor;
|
||||
|
@ -1,15 +1,14 @@
|
||||
use crate::actors::libvirt_actor;
|
||||
use crate::actors::libvirt_actor::LibVirtActor;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_lib_structures::domain::{DomainState, DomainXML};
|
||||
use crate::libvirt_lib_structures::network::NetworkXML;
|
||||
use crate::libvirt_lib_structures::nwfilter::NetworkFilterXML;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::hypervisor::HypervisorInfo;
|
||||
use crate::libvirt_rest_structures::net::NetworkInfo;
|
||||
use crate::libvirt_rest_structures::nw_filter::NetworkFilter;
|
||||
use crate::libvirt_rest_structures::vm::{VMGroupId, VMInfo};
|
||||
use crate::libvirt_rest_structures::vm::VMInfo;
|
||||
use actix::Addr;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LibVirtClient(pub Addr<LibVirtActor>);
|
||||
@ -108,35 +107,6 @@ impl LibVirtClient {
|
||||
.await?
|
||||
}
|
||||
|
||||
/// Get the full list of groups
|
||||
pub async fn get_full_groups_list(&self) -> anyhow::Result<Vec<VMGroupId>> {
|
||||
let domains = self.get_full_domains_list().await?;
|
||||
let mut out = HashSet::new();
|
||||
for d in domains {
|
||||
if let Some(g) = VMInfo::from_domain(d)?.group {
|
||||
out.insert(g);
|
||||
}
|
||||
}
|
||||
let mut out: Vec<_> = out.into_iter().collect();
|
||||
out.sort();
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Get the full list of VMs of a given group
|
||||
pub async fn get_full_group_vm_list(
|
||||
&self,
|
||||
group: &VMGroupId,
|
||||
) -> anyhow::Result<Vec<DomainXML>> {
|
||||
let vms = self.get_full_domains_list().await?;
|
||||
let mut out = Vec::new();
|
||||
for vm in vms {
|
||||
if VMInfo::from_domain(vm.clone())?.group == Some(group.clone()) {
|
||||
out.push(vm);
|
||||
}
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Update a network configuration
|
||||
pub async fn update_network(
|
||||
&self,
|
||||
|
@ -1,42 +1,17 @@
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::utils::cloud_init_utils::CloudInitConfig;
|
||||
|
||||
/// VirtWeb specific metadata
|
||||
#[derive(serde::Serialize, serde::Deserialize, Default, Debug, Clone)]
|
||||
#[serde(rename = "virtweb", default)]
|
||||
pub struct DomainMetadataVirtWebXML {
|
||||
#[serde(rename = "@xmlns:virtweb", default)]
|
||||
pub ns: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cloud_init: Option<CloudInitConfig>,
|
||||
}
|
||||
|
||||
/// Domain metadata
|
||||
#[derive(serde::Serialize, serde::Deserialize, Default, Debug, Clone)]
|
||||
#[serde(rename = "metadata")]
|
||||
pub struct DomainMetadataXML {
|
||||
#[serde(rename = "virtweb:metadata", default)]
|
||||
pub virtweb: DomainMetadataVirtWebXML,
|
||||
}
|
||||
|
||||
/// OS information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "os")]
|
||||
pub struct OSXML {
|
||||
#[serde(rename = "@firmware", default, skip_serializing_if = "Option::is_none")]
|
||||
pub firmware: Option<String>,
|
||||
#[serde(rename = "@firmware", default)]
|
||||
pub firmware: String,
|
||||
pub r#type: OSTypeXML,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub loader: Option<OSLoaderXML>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bootmenu: Option<OSBootMenuXML>,
|
||||
pub smbios: Option<OSSMBiosXML>,
|
||||
}
|
||||
|
||||
/// OS Type information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "os")]
|
||||
pub struct OSTypeXML {
|
||||
#[serde(rename = "@arch")]
|
||||
@ -48,67 +23,47 @@ pub struct OSTypeXML {
|
||||
}
|
||||
|
||||
/// OS Loader information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "loader")]
|
||||
pub struct OSLoaderXML {
|
||||
#[serde(rename = "@secure")]
|
||||
pub secure: String,
|
||||
}
|
||||
|
||||
/// Legacy boot menu information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "bootmenu")]
|
||||
pub struct OSBootMenuXML {
|
||||
#[serde(rename = "@enable")]
|
||||
pub enable: String,
|
||||
#[serde(rename = "@timeout")]
|
||||
pub timeout: usize,
|
||||
}
|
||||
|
||||
/// SMBIOS System information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "smbios")]
|
||||
pub struct OSSMBiosXML {
|
||||
#[serde(rename = "@mode")]
|
||||
pub mode: String,
|
||||
}
|
||||
|
||||
/// Hypervisor features
|
||||
#[derive(serde::Serialize, serde::Deserialize, Clone, Default, Debug)]
|
||||
#[derive(serde::Serialize, serde::Deserialize, Default)]
|
||||
#[serde(rename = "features")]
|
||||
pub struct FeaturesXML {
|
||||
pub acpi: ACPIXML,
|
||||
}
|
||||
|
||||
/// ACPI feature
|
||||
#[derive(serde::Serialize, serde::Deserialize, Clone, Default, Debug)]
|
||||
#[derive(serde::Serialize, serde::Deserialize, Default)]
|
||||
#[serde(rename = "acpi")]
|
||||
pub struct ACPIXML {}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "mac")]
|
||||
pub struct NetMacAddress {
|
||||
#[serde(rename = "@address")]
|
||||
pub address: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "source")]
|
||||
pub struct NetIntSourceXML {
|
||||
#[serde(rename = "@network")]
|
||||
pub network: Option<String>,
|
||||
#[serde(rename = "@bridge")]
|
||||
pub bridge: Option<String>,
|
||||
pub network: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "model")]
|
||||
pub struct NetIntModelXML {
|
||||
#[serde(rename = "@type")]
|
||||
pub r#type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "filterref")]
|
||||
pub struct NetIntFilterParameterXML {
|
||||
#[serde(rename = "@name")]
|
||||
@ -117,7 +72,7 @@ pub struct NetIntFilterParameterXML {
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "filterref")]
|
||||
pub struct NetIntfilterRefXML {
|
||||
#[serde(rename = "@filter")]
|
||||
@ -126,7 +81,7 @@ pub struct NetIntfilterRefXML {
|
||||
pub parameters: Vec<NetIntFilterParameterXML>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "interface")]
|
||||
pub struct DomainNetInterfaceXML {
|
||||
#[serde(rename = "@type")]
|
||||
@ -140,14 +95,14 @@ pub struct DomainNetInterfaceXML {
|
||||
pub filterref: Option<NetIntfilterRefXML>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "input")]
|
||||
pub struct DomainInputXML {
|
||||
#[serde(rename = "@type")]
|
||||
pub r#type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "backend")]
|
||||
pub struct TPMBackendXML {
|
||||
#[serde(rename = "@type")]
|
||||
@ -157,7 +112,7 @@ pub struct TPMBackendXML {
|
||||
pub r#version: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "tpm")]
|
||||
pub struct TPMDeviceXML {
|
||||
#[serde(rename = "@model")]
|
||||
@ -166,7 +121,7 @@ pub struct TPMDeviceXML {
|
||||
}
|
||||
|
||||
/// Devices information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "devices")]
|
||||
pub struct DevicesXML {
|
||||
/// Graphics (used for VNC)
|
||||
@ -195,7 +150,7 @@ pub struct DevicesXML {
|
||||
}
|
||||
|
||||
/// Graphics information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "graphics")]
|
||||
pub struct GraphicsXML {
|
||||
#[serde(rename = "@type")]
|
||||
@ -205,14 +160,14 @@ pub struct GraphicsXML {
|
||||
}
|
||||
|
||||
/// Video device information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "video")]
|
||||
pub struct VideoXML {
|
||||
pub model: VideoModelXML,
|
||||
}
|
||||
|
||||
/// Video model device information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "model")]
|
||||
pub struct VideoModelXML {
|
||||
#[serde(rename = "@type")]
|
||||
@ -220,7 +175,7 @@ pub struct VideoModelXML {
|
||||
}
|
||||
|
||||
/// Disk information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "disk")]
|
||||
pub struct DiskXML {
|
||||
#[serde(rename = "@type")]
|
||||
@ -238,7 +193,7 @@ pub struct DiskXML {
|
||||
pub address: Option<DiskAddressXML>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "driver")]
|
||||
pub struct DiskDriverXML {
|
||||
#[serde(rename = "@name")]
|
||||
@ -249,14 +204,14 @@ pub struct DiskDriverXML {
|
||||
pub r#cache: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "source")]
|
||||
pub struct DiskSourceXML {
|
||||
#[serde(rename = "@file")]
|
||||
pub file: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "target")]
|
||||
pub struct DiskTargetXML {
|
||||
#[serde(rename = "@dev")]
|
||||
@ -265,18 +220,18 @@ pub struct DiskTargetXML {
|
||||
pub bus: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "readonly")]
|
||||
pub struct DiskReadOnlyXML {}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "boot")]
|
||||
pub struct DiskBootXML {
|
||||
#[serde(rename = "@order")]
|
||||
pub order: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "address")]
|
||||
pub struct DiskAddressXML {
|
||||
#[serde(rename = "@type")]
|
||||
@ -296,7 +251,7 @@ pub struct DiskAddressXML {
|
||||
}
|
||||
|
||||
/// Domain RAM information
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "memory")]
|
||||
pub struct DomainMemoryXML {
|
||||
#[serde(rename = "@unit")]
|
||||
@ -306,7 +261,7 @@ pub struct DomainMemoryXML {
|
||||
pub memory: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "topology")]
|
||||
pub struct DomainCPUTopology {
|
||||
#[serde(rename = "@sockets")]
|
||||
@ -317,14 +272,14 @@ pub struct DomainCPUTopology {
|
||||
pub threads: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "cpu")]
|
||||
pub struct DomainVCPUXML {
|
||||
#[serde(rename = "$value")]
|
||||
pub body: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "cpu")]
|
||||
pub struct DomainCPUXML {
|
||||
#[serde(rename = "@mode")]
|
||||
@ -332,31 +287,8 @@ pub struct DomainCPUXML {
|
||||
pub topology: Option<DomainCPUTopology>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "entry")]
|
||||
pub struct OEMStringEntryXML {
|
||||
#[serde(rename = "$text", default)]
|
||||
pub content: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "oemStrings")]
|
||||
pub struct OEMStringsXML {
|
||||
#[serde(rename = "entry")]
|
||||
pub entries: Vec<OEMStringEntryXML>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "sysinfo")]
|
||||
pub struct SysInfoXML {
|
||||
#[serde(rename = "@type")]
|
||||
pub r#type: String,
|
||||
#[serde(rename = "oemStrings")]
|
||||
pub oem_strings: Option<OEMStringsXML>,
|
||||
}
|
||||
|
||||
/// Domain information, see https://libvirt.org/formatdomain.html
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(rename = "domain")]
|
||||
pub struct DomainXML {
|
||||
/// Domain type (kvm)
|
||||
@ -368,9 +300,6 @@ pub struct DomainXML {
|
||||
pub genid: Option<uuid::Uuid>,
|
||||
pub title: Option<String>,
|
||||
pub description: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub metadata: Option<DomainMetadataXML>,
|
||||
|
||||
pub os: OSXML,
|
||||
#[serde(default)]
|
||||
pub features: FeaturesXML,
|
||||
@ -385,41 +314,15 @@ pub struct DomainXML {
|
||||
/// CPU information
|
||||
pub cpu: DomainCPUXML,
|
||||
|
||||
/// SMBios strings
|
||||
pub sysinfo: Option<SysInfoXML>,
|
||||
|
||||
/// Behavior when guest state change
|
||||
pub on_poweroff: String,
|
||||
pub on_reboot: String,
|
||||
pub on_crash: String,
|
||||
}
|
||||
|
||||
const METADATA_START_MARKER: &str =
|
||||
"<virtweb:metadata xmlns:virtweb=\"https://virtweb.communiquons.org\">";
|
||||
const METADATA_END_MARKER: &str = "</virtweb:metadata>";
|
||||
|
||||
impl DomainXML {
|
||||
/// Decode Domain structure from XML definition
|
||||
pub fn parse_xml(xml: &str) -> anyhow::Result<Self> {
|
||||
let mut res: Self = quick_xml::de::from_str(xml)?;
|
||||
|
||||
// Handle custom metadata parsing issue
|
||||
//
|
||||
// https://github.com/tafia/quick-xml/pull/797
|
||||
if xml.contains(METADATA_START_MARKER) && xml.contains(METADATA_END_MARKER) {
|
||||
let s = xml
|
||||
.split_once(METADATA_START_MARKER)
|
||||
.unwrap()
|
||||
.1
|
||||
.split_once(METADATA_END_MARKER)
|
||||
.unwrap()
|
||||
.0;
|
||||
let s = format!("<virtweb>{s}</virtweb>");
|
||||
let metadata: DomainMetadataVirtWebXML = quick_xml::de::from_str(&s)?;
|
||||
res.metadata = Some(DomainMetadataXML { virtweb: metadata });
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
Ok(quick_xml::de::from_str(xml)?)
|
||||
}
|
||||
|
||||
/// Turn this domain into its XML definition
|
||||
|
@ -1,4 +1,4 @@
|
||||
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug, Eq, PartialEq, Hash)]
|
||||
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
|
||||
pub struct XMLUuid(pub uuid::Uuid);
|
||||
|
||||
impl XMLUuid {
|
||||
|
@ -13,6 +13,4 @@ enum LibVirtStructError {
|
||||
ParseFilteringChain(String),
|
||||
#[error("NetworkFilterExtractionError: {0}")]
|
||||
NetworkFilterExtraction(String),
|
||||
#[error("CloudInitConfigurationError: {0}")]
|
||||
CloudInitConfiguration(String),
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_lib_structures::network::*;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::LibVirtStructError::StructureExtraction;
|
||||
use crate::nat::nat_definition::Nat;
|
||||
use crate::nat::nat_lib;
|
||||
|
@ -1,9 +1,9 @@
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_lib_structures::nwfilter::{
|
||||
NetworkFilterRefXML, NetworkFilterRuleProtocolAllXML, NetworkFilterRuleProtocolArpXML,
|
||||
NetworkFilterRuleProtocolIpvx, NetworkFilterRuleProtocolLayer4, NetworkFilterRuleProtocolMac,
|
||||
NetworkFilterRuleXML, NetworkFilterXML,
|
||||
};
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::LibVirtStructError;
|
||||
use crate::libvirt_rest_structures::LibVirtStructError::{
|
||||
NetworkFilterExtraction, StructureExtraction,
|
||||
|
@ -1,26 +1,17 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_lib_structures::domain::*;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::libvirt_rest_structures::LibVirtStructError;
|
||||
use crate::libvirt_rest_structures::LibVirtStructError::{
|
||||
CloudInitConfiguration, StructureExtraction,
|
||||
};
|
||||
use crate::utils::cloud_init_utils::CloudInitConfig;
|
||||
use crate::utils::file_size_utils::FileSize;
|
||||
use crate::libvirt_rest_structures::LibVirtStructError::StructureExtraction;
|
||||
use crate::utils::disks_utils::Disk;
|
||||
use crate::utils::files_utils;
|
||||
use crate::utils::vm_file_disks_utils::{VMDiskBus, VMDiskFormat, VMFileDisk};
|
||||
use crate::utils::files_utils::convert_size_unit_to_mb;
|
||||
use lazy_regex::regex;
|
||||
use num::Integer;
|
||||
|
||||
#[derive(
|
||||
Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq, Hash, Ord, PartialOrd,
|
||||
)]
|
||||
pub struct VMGroupId(pub String);
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub enum BootType {
|
||||
Legacy,
|
||||
UEFI,
|
||||
UEFISecureBoot,
|
||||
}
|
||||
@ -33,12 +24,6 @@ pub enum VMArchitecture {
|
||||
X86_64,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub enum NetworkInterfaceModelType {
|
||||
Virtio,
|
||||
E1000,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub struct NWFilterParam {
|
||||
name: String,
|
||||
@ -56,7 +41,6 @@ pub struct Network {
|
||||
#[serde(flatten)]
|
||||
r#type: NetworkType,
|
||||
mac: String,
|
||||
model: NetworkInterfaceModelType,
|
||||
nwfilterref: Option<NWFilterRef>,
|
||||
}
|
||||
|
||||
@ -64,8 +48,7 @@ pub struct Network {
|
||||
#[serde(tag = "type")]
|
||||
pub enum NetworkType {
|
||||
UserspaceSLIRPStack,
|
||||
DefinedNetwork { network: String },
|
||||
Bridge { bridge: String },
|
||||
DefinedNetwork { network: String }, // TODO : complete network types
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
@ -76,35 +59,27 @@ pub struct VMInfo {
|
||||
pub genid: Option<XMLUuid>,
|
||||
pub title: Option<String>,
|
||||
pub description: Option<String>,
|
||||
/// Group associated with the VM (VirtWeb specific field)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group: Option<VMGroupId>,
|
||||
pub boot_type: BootType,
|
||||
pub architecture: VMArchitecture,
|
||||
/// VM allocated RAM memory
|
||||
pub memory: FileSize,
|
||||
/// VM allocated memory, in megabytes
|
||||
pub memory: usize,
|
||||
/// Number of vCPU for the VM
|
||||
pub number_vcpu: usize,
|
||||
/// Enable VNC access through admin console
|
||||
pub vnc_access: bool,
|
||||
/// Attach ISO file(s)
|
||||
pub iso_files: Vec<String>,
|
||||
/// File Storage - https://access.redhat.com/documentation/fr-fr/red_hat_enterprise_linux/6/html/virtualization_administration_guide/sect-virtualization-virtualized_block_devices-adding_storage_devices_to_guests#sect-Virtualization-Adding_storage_devices_to_guests-Adding_file_based_storage_to_a_guest
|
||||
pub file_disks: Vec<VMFileDisk>,
|
||||
/// Storage - https://access.redhat.com/documentation/fr-fr/red_hat_enterprise_linux/6/html/virtualization_administration_guide/sect-virtualization-virtualized_block_devices-adding_storage_devices_to_guests#sect-Virtualization-Adding_storage_devices_to_guests-Adding_file_based_storage_to_a_guest
|
||||
pub disks: Vec<Disk>,
|
||||
/// Network cards
|
||||
pub networks: Vec<Network>,
|
||||
/// Add a TPM v2.0 module
|
||||
pub tpm_module: bool,
|
||||
/// Strings injected as OEM Strings in SMBios configuration
|
||||
pub oem_strings: Vec<String>,
|
||||
/// Cloud init configuration
|
||||
#[serde(default)]
|
||||
pub cloud_init: CloudInitConfig,
|
||||
}
|
||||
|
||||
impl VMInfo {
|
||||
/// Turn this VM into a domain
|
||||
pub fn as_domain(&self) -> anyhow::Result<DomainXML> {
|
||||
pub fn as_tomain(&self) -> anyhow::Result<DomainXML> {
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(&self.name) {
|
||||
return Err(StructureExtraction("VM name is invalid!").into());
|
||||
}
|
||||
@ -130,12 +105,6 @@ impl VMInfo {
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(group) = &self.group {
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(&group.0) {
|
||||
return Err(StructureExtraction("VM group name is invalid!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if self.memory < constants::MIN_VM_MEMORY || self.memory > constants::MAX_VM_MEMORY {
|
||||
return Err(StructureExtraction("VM memory is invalid!").into());
|
||||
}
|
||||
@ -144,26 +113,8 @@ impl VMInfo {
|
||||
return Err(StructureExtraction("Invalid number of vCPU specified!").into());
|
||||
}
|
||||
|
||||
if let Some(e) = self.cloud_init.check_error() {
|
||||
return Err(CloudInitConfiguration(e).into());
|
||||
}
|
||||
let mut disks = vec![];
|
||||
|
||||
let mut iso_absolute_files = vec![];
|
||||
|
||||
// Process cloud init image
|
||||
if self.cloud_init.attach_config {
|
||||
let cloud_init_disk_path = AppConfig::get().cloud_init_disk_path_for_vm(&self.name);
|
||||
|
||||
// Apply latest cloud init configuration
|
||||
std::fs::write(
|
||||
&cloud_init_disk_path,
|
||||
self.cloud_init.generate_nocloud_disk()?,
|
||||
)?;
|
||||
|
||||
iso_absolute_files.push(cloud_init_disk_path);
|
||||
}
|
||||
|
||||
// Process uploaded ISO files
|
||||
for iso_file in &self.iso_files {
|
||||
if !files_utils::check_file_name(iso_file) {
|
||||
return Err(StructureExtraction("ISO filename is invalid!").into());
|
||||
@ -175,13 +126,6 @@ impl VMInfo {
|
||||
return Err(StructureExtraction("Specified ISO file does not exists!").into());
|
||||
}
|
||||
|
||||
iso_absolute_files.push(path);
|
||||
}
|
||||
|
||||
let mut disks = vec![];
|
||||
|
||||
// Add ISO disk files
|
||||
for iso_path in iso_absolute_files {
|
||||
disks.push(DiskXML {
|
||||
r#type: "file".to_string(),
|
||||
device: "cdrom".to_string(),
|
||||
@ -191,7 +135,7 @@ impl VMInfo {
|
||||
cache: "none".to_string(),
|
||||
},
|
||||
source: DiskSourceXML {
|
||||
file: iso_path.to_string_lossy().to_string(),
|
||||
file: path.to_string_lossy().to_string(),
|
||||
},
|
||||
target: DiskTargetXML {
|
||||
dev: format!(
|
||||
@ -208,7 +152,6 @@ impl VMInfo {
|
||||
})
|
||||
}
|
||||
|
||||
// Configure VNC access, if requested
|
||||
let (vnc_graphics, vnc_video) = match self.vnc_access {
|
||||
true => (
|
||||
Some(GraphicsXML {
|
||||
@ -235,11 +178,7 @@ impl VMInfo {
|
||||
};
|
||||
|
||||
let model = Some(NetIntModelXML {
|
||||
r#type: match n.model {
|
||||
NetworkInterfaceModelType::Virtio => "virtio",
|
||||
NetworkInterfaceModelType::E1000 => "e1000",
|
||||
}
|
||||
.to_string(),
|
||||
r#type: "virtio".to_string(),
|
||||
});
|
||||
|
||||
let filterref = if let Some(n) = &n.nwfilterref {
|
||||
@ -284,18 +223,7 @@ impl VMInfo {
|
||||
mac,
|
||||
r#type: "network".to_string(),
|
||||
source: Some(NetIntSourceXML {
|
||||
network: Some(network.to_string()),
|
||||
bridge: None,
|
||||
}),
|
||||
model,
|
||||
filterref,
|
||||
},
|
||||
NetworkType::Bridge { bridge } => DomainNetInterfaceXML {
|
||||
r#type: "bridge".to_string(),
|
||||
mac,
|
||||
source: Some(NetIntSourceXML {
|
||||
network: None,
|
||||
bridge: Some(bridge.to_string()),
|
||||
network: network.to_string(),
|
||||
}),
|
||||
model,
|
||||
filterref,
|
||||
@ -304,21 +232,15 @@ impl VMInfo {
|
||||
}
|
||||
|
||||
// Check disks name for duplicates
|
||||
for disk in &self.file_disks {
|
||||
if self
|
||||
.file_disks
|
||||
.iter()
|
||||
.filter(|d| d.name == disk.name)
|
||||
.count()
|
||||
> 1
|
||||
{
|
||||
for disk in &self.disks {
|
||||
if self.disks.iter().filter(|d| d.name == disk.name).count() > 1 {
|
||||
return Err(StructureExtraction("Two different disks have the same name!").into());
|
||||
}
|
||||
}
|
||||
|
||||
// Apply disks configuration. Starting from now, the function should ideally never fail due to
|
||||
// bad user input
|
||||
for disk in &self.file_disks {
|
||||
for disk in &self.disks {
|
||||
disk.check_config()?;
|
||||
disk.apply_config(uuid)?;
|
||||
|
||||
@ -331,10 +253,7 @@ impl VMInfo {
|
||||
device: "disk".to_string(),
|
||||
driver: DiskDriverXML {
|
||||
name: "qemu".to_string(),
|
||||
r#type: match disk.format {
|
||||
VMDiskFormat::Raw { .. } => "raw".to_string(),
|
||||
VMDiskFormat::QCow2 => "qcow2".to_string(),
|
||||
},
|
||||
r#type: "raw".to_string(),
|
||||
cache: "none".to_string(),
|
||||
},
|
||||
source: DiskSourceXML {
|
||||
@ -345,11 +264,7 @@ impl VMInfo {
|
||||
"vd{}",
|
||||
["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"][disks.len()]
|
||||
),
|
||||
bus: match disk.bus {
|
||||
VMDiskBus::Virtio => "virtio",
|
||||
VMDiskBus::SATA => "sata",
|
||||
}
|
||||
.to_string(),
|
||||
bus: "virtio".to_string(),
|
||||
},
|
||||
readonly: None,
|
||||
boot: DiskBootXML {
|
||||
@ -367,13 +282,6 @@ impl VMInfo {
|
||||
title: self.title.clone(),
|
||||
description: self.description.clone(),
|
||||
|
||||
metadata: Some(DomainMetadataXML {
|
||||
virtweb: DomainMetadataVirtWebXML {
|
||||
ns: "https://virtweb.communiquons.org".to_string(),
|
||||
group: self.group.clone().map(|g| g.0),
|
||||
cloud_init: Some(self.cloud_init.clone()),
|
||||
},
|
||||
}),
|
||||
os: OSXML {
|
||||
r#type: OSTypeXML {
|
||||
arch: match self.architecture {
|
||||
@ -384,28 +292,12 @@ impl VMInfo {
|
||||
machine: "q35".to_string(),
|
||||
body: "hvm".to_string(),
|
||||
},
|
||||
firmware: match self.boot_type {
|
||||
BootType::Legacy => None,
|
||||
_ => Some("efi".to_string()),
|
||||
},
|
||||
loader: match self.boot_type {
|
||||
BootType::Legacy => None,
|
||||
_ => Some(OSLoaderXML {
|
||||
secure: match self.boot_type {
|
||||
BootType::UEFISecureBoot => "yes".to_string(),
|
||||
_ => "no".to_string(),
|
||||
},
|
||||
}),
|
||||
},
|
||||
bootmenu: match self.boot_type {
|
||||
BootType::Legacy => Some(OSBootMenuXML {
|
||||
enable: "yes".to_string(),
|
||||
timeout: 3000,
|
||||
}),
|
||||
_ => None,
|
||||
},
|
||||
smbios: Some(OSSMBiosXML {
|
||||
mode: "sysinfo".to_string(),
|
||||
firmware: "efi".to_string(),
|
||||
loader: Some(OSLoaderXML {
|
||||
secure: match self.boot_type {
|
||||
BootType::UEFI => "no".to_string(),
|
||||
BootType::UEFISecureBoot => "yes".to_string(),
|
||||
},
|
||||
}),
|
||||
},
|
||||
|
||||
@ -441,7 +333,7 @@ impl VMInfo {
|
||||
|
||||
memory: DomainMemoryXML {
|
||||
unit: "MB".to_string(),
|
||||
memory: self.memory.as_mb(),
|
||||
memory: self.memory,
|
||||
},
|
||||
|
||||
vcpu: DomainVCPUXML {
|
||||
@ -463,17 +355,6 @@ impl VMInfo {
|
||||
}),
|
||||
},
|
||||
|
||||
sysinfo: Some(SysInfoXML {
|
||||
r#type: "smbios".to_string(),
|
||||
oem_strings: Some(OEMStringsXML {
|
||||
entries: self
|
||||
.oem_strings
|
||||
.iter()
|
||||
.map(|s| OEMStringEntryXML { content: s.clone() })
|
||||
.collect(),
|
||||
}),
|
||||
}),
|
||||
|
||||
on_poweroff: "destroy".to_string(),
|
||||
on_reboot: "restart".to_string(),
|
||||
on_crash: "destroy".to_string(),
|
||||
@ -488,17 +369,9 @@ impl VMInfo {
|
||||
genid: domain.genid.map(XMLUuid),
|
||||
title: domain.title,
|
||||
description: domain.description,
|
||||
group: domain
|
||||
.metadata
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.virtweb
|
||||
.group
|
||||
.map(VMGroupId),
|
||||
boot_type: match (domain.os.loader, domain.os.bootmenu) {
|
||||
(_, Some(_)) => BootType::Legacy,
|
||||
(None, _) => BootType::UEFI,
|
||||
(Some(l), _) => match l.secure.as_str() {
|
||||
boot_type: match domain.os.loader {
|
||||
None => BootType::UEFI,
|
||||
Some(l) => match l.secure.as_str() {
|
||||
"yes" => BootType::UEFISecureBoot,
|
||||
_ => BootType::UEFI,
|
||||
},
|
||||
@ -514,7 +387,7 @@ impl VMInfo {
|
||||
}
|
||||
},
|
||||
number_vcpu: domain.vcpu.body,
|
||||
memory: FileSize::from_size_unit(&domain.memory.unit, domain.memory.memory)?,
|
||||
memory: convert_size_unit_to_mb(&domain.memory.unit, domain.memory.memory)?,
|
||||
vnc_access: domain.devices.graphics.is_some(),
|
||||
iso_files: domain
|
||||
.devices
|
||||
@ -522,18 +395,14 @@ impl VMInfo {
|
||||
.iter()
|
||||
.filter(|d| d.device == "cdrom")
|
||||
.map(|d| d.source.file.rsplit_once('/').unwrap().1.to_string())
|
||||
.filter(|d| !d.starts_with(constants::CLOUD_INIT_IMAGE_PREFIX_NAME))
|
||||
.collect(),
|
||||
|
||||
file_disks: domain
|
||||
disks: domain
|
||||
.devices
|
||||
.disks
|
||||
.iter()
|
||||
.filter(|d| d.device == "disk")
|
||||
.map(|d| {
|
||||
VMFileDisk::load_from_file(&d.source.file, &d.target.bus)
|
||||
.expect("Failed to load file disk information!")
|
||||
})
|
||||
.map(|d| Disk::load_from_file(&d.source.file).unwrap())
|
||||
.collect(),
|
||||
|
||||
networks: domain
|
||||
@ -546,34 +415,7 @@ impl VMInfo {
|
||||
r#type: match d.r#type.as_str() {
|
||||
"user" => NetworkType::UserspaceSLIRPStack,
|
||||
"network" => NetworkType::DefinedNetwork {
|
||||
network: d
|
||||
.source
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.network
|
||||
.as_deref()
|
||||
.ok_or_else(|| {
|
||||
LibVirtStructError::DomainExtraction(
|
||||
"Missing source network for defined network!"
|
||||
.to_string(),
|
||||
)
|
||||
})?
|
||||
.to_string(),
|
||||
},
|
||||
"bridge" => NetworkType::Bridge {
|
||||
bridge: d
|
||||
.source
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.bridge
|
||||
.as_deref()
|
||||
.ok_or_else(|| {
|
||||
LibVirtStructError::DomainExtraction(
|
||||
"Missing bridge name for bridge connection!"
|
||||
.to_string(),
|
||||
)
|
||||
})?
|
||||
.to_string(),
|
||||
network: d.source.as_ref().unwrap().network.to_string(),
|
||||
},
|
||||
a => {
|
||||
return Err(LibVirtStructError::DomainExtraction(format!(
|
||||
@ -581,18 +423,6 @@ impl VMInfo {
|
||||
)));
|
||||
}
|
||||
},
|
||||
model: match d.model.as_ref() {
|
||||
None => NetworkInterfaceModelType::Virtio,
|
||||
Some(model) => match model.r#type.as_str() {
|
||||
"virtio" => NetworkInterfaceModelType::Virtio,
|
||||
"e1000" => NetworkInterfaceModelType::E1000,
|
||||
model => {
|
||||
return Err(LibVirtStructError::DomainExtraction(format!(
|
||||
"Unknown network interface model type: {model}! "
|
||||
)));
|
||||
}
|
||||
},
|
||||
},
|
||||
nwfilterref: d.filterref.as_ref().map(|f| NWFilterRef {
|
||||
name: f.filter.to_string(),
|
||||
parameters: f
|
||||
@ -609,19 +439,6 @@ impl VMInfo {
|
||||
.collect::<Result<Vec<_>, _>>()?,
|
||||
|
||||
tpm_module: domain.devices.tpm.is_some(),
|
||||
|
||||
oem_strings: domain
|
||||
.sysinfo
|
||||
.and_then(|s| s.oem_strings)
|
||||
.map(|s| s.entries.iter().map(|o| o.content.to_string()).collect())
|
||||
.unwrap_or_default(),
|
||||
cloud_init: domain
|
||||
.metadata
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.virtweb
|
||||
.cloud_init
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,19 +1,18 @@
|
||||
use actix::Actor;
|
||||
use actix_cors::Cors;
|
||||
use actix_identity::IdentityMiddleware;
|
||||
use actix_identity::config::LogoutBehaviour;
|
||||
use actix_multipart::form::MultipartFormConfig;
|
||||
use actix_identity::IdentityMiddleware;
|
||||
use actix_multipart::form::tempfile::TempFileConfig;
|
||||
use actix_multipart::form::MultipartFormConfig;
|
||||
use actix_remote_ip::RemoteIPConfig;
|
||||
use actix_session::SessionMiddleware;
|
||||
use actix_session::storage::CookieSessionStore;
|
||||
use actix_session::SessionMiddleware;
|
||||
use actix_web::cookie::{Key, SameSite};
|
||||
use actix_web::http::header;
|
||||
use actix_web::middleware::Logger;
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{App, HttpServer, web};
|
||||
use actix_web::{web, App, HttpServer};
|
||||
use light_openid::basic_state_manager::BasicStateManager;
|
||||
use std::cmp::max;
|
||||
use std::time::Duration;
|
||||
use virtweb_backend::actors::libvirt_actor::LibVirtActor;
|
||||
use virtweb_backend::actors::vnc_tokens_actor::VNCTokensManager;
|
||||
@ -23,14 +22,13 @@ use virtweb_backend::constants::{
|
||||
MAX_INACTIVITY_DURATION, MAX_SESSION_DURATION, SESSION_COOKIE_NAME,
|
||||
};
|
||||
use virtweb_backend::controllers::{
|
||||
api_tokens_controller, auth_controller, disk_images_controller, groups_controller,
|
||||
iso_controller, network_controller, nwfilter_controller, server_controller, static_controller,
|
||||
vm_controller,
|
||||
api_tokens_controller, auth_controller, iso_controller, network_controller,
|
||||
nwfilter_controller, server_controller, static_controller, vm_controller,
|
||||
};
|
||||
use virtweb_backend::libvirt_client::LibVirtClient;
|
||||
use virtweb_backend::middlewares::auth_middleware::AuthChecker;
|
||||
use virtweb_backend::nat::nat_conf_mode;
|
||||
use virtweb_backend::utils::{exec_utils, files_utils};
|
||||
use virtweb_backend::utils::files_utils;
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
@ -45,29 +43,11 @@ async fn main() -> std::io::Result<()> {
|
||||
// Load additional config from file, if requested
|
||||
AppConfig::parse_env_file().unwrap();
|
||||
|
||||
log::debug!("Checking for required programs");
|
||||
exec_utils::check_program(
|
||||
constants::PROGRAM_QEMU_IMAGE,
|
||||
"QEMU disk image utility is required to manipulate QCow2 files!",
|
||||
);
|
||||
exec_utils::check_program(
|
||||
constants::PROGRAM_IP,
|
||||
"ip is required to access bridges information!",
|
||||
);
|
||||
exec_utils::check_program(
|
||||
constants::PROGRAM_CLOUD_LOCALDS,
|
||||
"cloud-localds from package cloud-image-utils is required to build cloud-init images!",
|
||||
);
|
||||
|
||||
log::debug!("Create required directory, if missing");
|
||||
files_utils::create_directory_if_missing(AppConfig::get().iso_storage_path()).unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().cloud_init_disk_storage_path())
|
||||
.unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().disk_images_storage_path()).unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().vnc_sockets_path()).unwrap();
|
||||
files_utils::set_file_permission(AppConfig::get().vnc_sockets_path(), 0o777).unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().root_vm_disks_storage_path())
|
||||
.unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().disks_storage_path()).unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().nat_path()).unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().definitions_path()).unwrap();
|
||||
files_utils::create_directory_if_missing(AppConfig::get().api_tokens_path()).unwrap();
|
||||
@ -128,9 +108,7 @@ async fn main() -> std::io::Result<()> {
|
||||
}))
|
||||
.app_data(conn.clone())
|
||||
// Uploaded files
|
||||
.app_data(MultipartFormConfig::default().total_limit(
|
||||
max(constants::DISK_IMAGE_MAX_SIZE, constants::ISO_MAX_SIZE).as_bytes(),
|
||||
))
|
||||
.app_data(MultipartFormConfig::default().total_limit(constants::ISO_MAX_SIZE))
|
||||
.app_data(TempFileConfig::default().directory(&AppConfig::get().temp_dir))
|
||||
// Server controller
|
||||
.route(
|
||||
@ -153,14 +131,6 @@ async fn main() -> std::io::Result<()> {
|
||||
"/api/server/networks",
|
||||
web::get().to(server_controller::networks_list),
|
||||
)
|
||||
.route(
|
||||
"/api/server/bridges",
|
||||
web::get().to(server_controller::bridges_list),
|
||||
)
|
||||
.route(
|
||||
"/api/server/export_configs",
|
||||
web::get().to(server_controller::export_all_configs),
|
||||
)
|
||||
// Auth controller
|
||||
.route(
|
||||
"/api/auth/local",
|
||||
@ -208,10 +178,6 @@ async fn main() -> std::io::Result<()> {
|
||||
"/api/vm/{uid}/src",
|
||||
web::get().to(vm_controller::get_single_src_def),
|
||||
)
|
||||
.route(
|
||||
"/api/vm/{uid}/cloud_init_disk",
|
||||
web::get().to(vm_controller::get_cloud_init_disk),
|
||||
)
|
||||
.route(
|
||||
"/api/vm/{uid}/autostart",
|
||||
web::get().to(vm_controller::get_autostart),
|
||||
@ -244,44 +210,6 @@ async fn main() -> std::io::Result<()> {
|
||||
web::get().to(vm_controller::vnc_token),
|
||||
)
|
||||
.route("/api/vnc", web::get().to(vm_controller::vnc))
|
||||
// Groups controller
|
||||
.route("/api/group/list", web::get().to(groups_controller::list))
|
||||
.route(
|
||||
"/api/group/{gid}/vm/info",
|
||||
web::get().to(groups_controller::vm_info),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/start",
|
||||
web::get().to(groups_controller::vm_start),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/shutdown",
|
||||
web::get().to(groups_controller::vm_shutdown),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/suspend",
|
||||
web::get().to(groups_controller::vm_suspend),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/resume",
|
||||
web::get().to(groups_controller::vm_resume),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/kill",
|
||||
web::get().to(groups_controller::vm_kill),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/reset",
|
||||
web::get().to(groups_controller::vm_reset),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/screenshot",
|
||||
web::get().to(groups_controller::vm_screenshot),
|
||||
)
|
||||
.route(
|
||||
"/api/group/{gid}/vm/state",
|
||||
web::get().to(groups_controller::vm_state),
|
||||
)
|
||||
// Network controller
|
||||
.route(
|
||||
"/api/network/create",
|
||||
@ -349,35 +277,6 @@ async fn main() -> std::io::Result<()> {
|
||||
"/api/nwfilter/{uid}",
|
||||
web::delete().to(nwfilter_controller::delete),
|
||||
)
|
||||
// Disk images library
|
||||
.route(
|
||||
"/api/disk_images/upload",
|
||||
web::post().to(disk_images_controller::upload),
|
||||
)
|
||||
.route(
|
||||
"/api/disk_images/list",
|
||||
web::get().to(disk_images_controller::get_list),
|
||||
)
|
||||
.route(
|
||||
"/api/disk_images/{filename}",
|
||||
web::get().to(disk_images_controller::download),
|
||||
)
|
||||
.route(
|
||||
"/api/disk_images/{filename}/convert",
|
||||
web::post().to(disk_images_controller::convert),
|
||||
)
|
||||
.route(
|
||||
"/api/disk_images/{filename}/rename",
|
||||
web::post().to(disk_images_controller::rename),
|
||||
)
|
||||
.route(
|
||||
"/api/disk_images/{filename}",
|
||||
web::delete().to(disk_images_controller::delete),
|
||||
)
|
||||
.route(
|
||||
"/api/vm/{uid}/disk/{diskid}/backup",
|
||||
web::post().to(disk_images_controller::backup_disk),
|
||||
)
|
||||
// API tokens controller
|
||||
.route(
|
||||
"/api/token/create",
|
||||
@ -400,11 +299,6 @@ async fn main() -> std::io::Result<()> {
|
||||
web::delete().to(api_tokens_controller::delete),
|
||||
)
|
||||
// Static assets
|
||||
.route(
|
||||
"/api/assets/{tail:.*}",
|
||||
web::get().to(static_controller::serve_assets::serve_api_assets),
|
||||
)
|
||||
// Static web frontend
|
||||
.route("/", web::get().to(static_controller::root_index))
|
||||
.route(
|
||||
"/{tail:.*}",
|
||||
|
@ -1,4 +1,4 @@
|
||||
use std::future::{Ready, ready};
|
||||
use std::future::{ready, Ready};
|
||||
use std::rc::Rc;
|
||||
|
||||
use crate::app_config::AppConfig;
|
||||
@ -8,8 +8,8 @@ use crate::extractors::auth_extractor::AuthExtractor;
|
||||
use actix_web::body::EitherBody;
|
||||
use actix_web::dev::Payload;
|
||||
use actix_web::{
|
||||
dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform},
|
||||
Error, FromRequest, HttpResponse,
|
||||
dev::{Service, ServiceRequest, ServiceResponse, Transform, forward_ready},
|
||||
};
|
||||
use futures_util::future::LocalBoxFuture;
|
||||
|
||||
@ -68,10 +68,7 @@ where
|
||||
.unwrap();
|
||||
|
||||
if !AppConfig::get().is_allowed_ip(remote_ip.0) {
|
||||
log::error!(
|
||||
"An attempt to access VirtWeb from an unauthorized network has been intercepted! {:?}",
|
||||
remote_ip
|
||||
);
|
||||
log::error!("An attempt to access VirtWeb from an unauthorized network has been intercepted! {:?}", remote_ip);
|
||||
return Ok(req
|
||||
.into_response(
|
||||
HttpResponse::MethodNotAllowed()
|
||||
@ -89,8 +86,8 @@ where
|
||||
Ok(auth) => auth,
|
||||
Err(e) => {
|
||||
log::error!(
|
||||
"Failed to extract API authentication information from request! {e}"
|
||||
);
|
||||
"Failed to extract API authentication information from request! {e}"
|
||||
);
|
||||
return Ok(req
|
||||
.into_response(HttpResponse::PreconditionFailed().finish())
|
||||
.map_into_right_body());
|
||||
|
@ -49,9 +49,7 @@ pub async fn sub_main() -> anyhow::Result<()> {
|
||||
let args = NatArgs::parse();
|
||||
|
||||
if !args.network_file().exists() {
|
||||
log::warn!(
|
||||
"Cannot do anything for the network, because the NAT configuration file does not exixsts!"
|
||||
);
|
||||
log::warn!("Cannot do anything for the network, because the NAT configuration file does not exixsts!");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -186,9 +184,7 @@ fn toggle_port_forwarding(
|
||||
false => "tcp",
|
||||
};
|
||||
|
||||
log::info!(
|
||||
"Forward (add={enable}) incoming {protocol} connections for {host_ip}:{host_port} to {guest_ip}:{guest_port} int {net_interface}"
|
||||
);
|
||||
log::info!("Forward (add={enable}) incoming {protocol} connections for {host_ip}:{host_port} to {guest_ip}:{guest_port} int {net_interface}");
|
||||
|
||||
// Rule 1
|
||||
let cmd = Command::new(program)
|
||||
|
@ -1,117 +0,0 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use std::process::Command;
|
||||
|
||||
/// Cloud init DS Mode
|
||||
#[derive(Copy, Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub enum CloudInitDSMode {
|
||||
/// Networking is required
|
||||
Net,
|
||||
/// Does not require networking to be up before user-data actions are run
|
||||
Local,
|
||||
}
|
||||
|
||||
/// VM Cloud Init configuration
|
||||
///
|
||||
/// RedHat documentation: https://docs.redhat.com/fr/documentation/red_hat_enterprise_linux/9/html/configuring_and_managing_cloud-init_for_rhel_9/configuring-cloud-init_cloud-content
|
||||
/// cloud-localds source code: https://github.com/canonical/cloud-utils/blob/main/bin/cloud-localds
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, Default)]
|
||||
pub struct CloudInitConfig {
|
||||
pub attach_config: bool,
|
||||
/// Main user data
|
||||
pub user_data: String,
|
||||
/// Instance ID, set in metadata file
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub instance_id: Option<String>,
|
||||
/// Local hostname, set in metadata file
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub local_hostname: Option<String>,
|
||||
/// Data source mode
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub dsmode: Option<CloudInitDSMode>,
|
||||
/// Network configuration
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub network_configuration: Option<String>,
|
||||
}
|
||||
|
||||
impl CloudInitConfig {
|
||||
/// Check cloud init configuration
|
||||
pub fn check_error(&self) -> Option<String> {
|
||||
if !self.user_data.is_empty() {
|
||||
// Check YAML content
|
||||
if let Err(e) = serde_yml::from_str::<serde_json::Value>(&self.user_data) {
|
||||
return Some(format!(
|
||||
"user data is an invalid YAML file! Deserialization error: {e}"
|
||||
));
|
||||
}
|
||||
|
||||
// Check first line
|
||||
if !self.user_data.starts_with("#cloud-config\n") {
|
||||
return Some(
|
||||
"user data file MUST start with '#cloud-config' as first line!".to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Generate disk image for nocloud usage
|
||||
pub fn generate_nocloud_disk(&self) -> anyhow::Result<Vec<u8>> {
|
||||
let temp_path = tempfile::tempdir_in(&AppConfig::get().temp_dir)?;
|
||||
|
||||
let mut cmd = Command::new(constants::PROGRAM_CLOUD_LOCALDS);
|
||||
|
||||
// ISO destination path
|
||||
let temp_iso = temp_path.path().join("disk.iso");
|
||||
cmd.arg(&temp_iso);
|
||||
|
||||
// Process network configuration
|
||||
if let Some(net_conf) = &self.network_configuration {
|
||||
let net_conf_path = temp_path.path().join("network");
|
||||
std::fs::write(&net_conf_path, net_conf)?;
|
||||
cmd.arg("--network-config").arg(&net_conf_path);
|
||||
}
|
||||
|
||||
// Process user data
|
||||
let user_data_path = temp_path.path().join("user-data");
|
||||
std::fs::write(&user_data_path, &self.user_data)?;
|
||||
cmd.arg(user_data_path);
|
||||
|
||||
// Process metadata
|
||||
let mut metadatas = vec![];
|
||||
if let Some(inst_id) = &self.instance_id {
|
||||
metadatas.push(format!("instance-id: {}", inst_id));
|
||||
}
|
||||
if let Some(local_hostname) = &self.local_hostname {
|
||||
metadatas.push(format!("local-hostname: {}", local_hostname));
|
||||
}
|
||||
if let Some(dsmode) = &self.dsmode {
|
||||
metadatas.push(format!(
|
||||
"dsmode: {}",
|
||||
match dsmode {
|
||||
CloudInitDSMode::Net => "net",
|
||||
CloudInitDSMode::Local => "local",
|
||||
}
|
||||
));
|
||||
}
|
||||
let meta_data_path = temp_path.path().join("meta-data");
|
||||
std::fs::write(&meta_data_path, metadatas.join("\n"))?;
|
||||
cmd.arg(meta_data_path);
|
||||
|
||||
// Execute command
|
||||
let output = cmd.output()?;
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"{} exited with status {}!\nStdout: {}\nStderr: {}",
|
||||
constants::PROGRAM_CLOUD_LOCALDS,
|
||||
output.status,
|
||||
String::from_utf8_lossy(&output.stdout),
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
// Read generated ISO file
|
||||
Ok(std::fs::read(temp_iso)?)
|
||||
}
|
||||
}
|
133
virtweb_backend/src/utils/disks_utils.rs
Normal file
133
virtweb_backend/src/utils/disks_utils.rs
Normal file
@ -0,0 +1,133 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::utils::files_utils;
|
||||
use lazy_regex::regex;
|
||||
use std::os::linux::fs::MetadataExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum DisksError {
|
||||
#[error("DiskParseError: {0}")]
|
||||
Parse(&'static str),
|
||||
#[error("DiskConfigError: {0}")]
|
||||
Config(&'static str),
|
||||
#[error("DiskCreateError")]
|
||||
Create,
|
||||
}
|
||||
|
||||
/// Type of disk allocation
|
||||
#[derive(Copy, Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub enum DiskAllocType {
|
||||
Fixed,
|
||||
Sparse,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub struct Disk {
|
||||
/// Disk size, in megabytes
|
||||
pub size: usize,
|
||||
/// Disk name
|
||||
pub name: String,
|
||||
pub alloc_type: DiskAllocType,
|
||||
/// Set this variable to true to delete the disk
|
||||
pub delete: bool,
|
||||
}
|
||||
|
||||
impl Disk {
|
||||
pub fn load_from_file(path: &str) -> anyhow::Result<Self> {
|
||||
let file = Path::new(path);
|
||||
|
||||
if !file.is_file() {
|
||||
return Err(DisksError::Parse("Path is not a file!").into());
|
||||
}
|
||||
|
||||
let metadata = file.metadata()?;
|
||||
|
||||
// Approximate estimation
|
||||
let is_sparse = metadata.len() / 512 >= metadata.st_blocks();
|
||||
|
||||
Ok(Self {
|
||||
size: metadata.len() as usize / (1000 * 1000),
|
||||
name: path.rsplit_once('/').unwrap().1.to_string(),
|
||||
alloc_type: match is_sparse {
|
||||
true => DiskAllocType::Sparse,
|
||||
false => DiskAllocType::Fixed,
|
||||
},
|
||||
delete: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn check_config(&self) -> anyhow::Result<()> {
|
||||
if constants::DISK_NAME_MIN_LEN > self.name.len()
|
||||
|| constants::DISK_NAME_MAX_LEN < self.name.len()
|
||||
{
|
||||
return Err(DisksError::Config("Disk name length is invalid").into());
|
||||
}
|
||||
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(&self.name) {
|
||||
return Err(DisksError::Config("Disk name contains invalid characters!").into());
|
||||
}
|
||||
|
||||
if self.size < constants::DISK_SIZE_MIN || self.size > constants::DISK_SIZE_MAX {
|
||||
return Err(DisksError::Config("Disk size is invalid!").into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get disk path
|
||||
pub fn disk_path(&self, id: XMLUuid) -> PathBuf {
|
||||
let domain_dir = AppConfig::get().vm_storage_path(id);
|
||||
domain_dir.join(&self.name)
|
||||
}
|
||||
|
||||
/// Apply disk configuration
|
||||
pub fn apply_config(&self, id: XMLUuid) -> anyhow::Result<()> {
|
||||
self.check_config()?;
|
||||
|
||||
let file = self.disk_path(id);
|
||||
files_utils::create_directory_if_missing(file.parent().unwrap())?;
|
||||
|
||||
// Delete file if requested
|
||||
if self.delete {
|
||||
if !file.exists() {
|
||||
log::debug!("File {file:?} does not exists, so it was not deleted");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::info!("Deleting {file:?}");
|
||||
std::fs::remove_file(file)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if file.exists() {
|
||||
log::debug!("File {file:?} does not exists, so it was not touched");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut cmd = Command::new("/usr/bin/dd");
|
||||
cmd.arg("if=/dev/zero")
|
||||
.arg(format!("of={}", file.to_string_lossy()))
|
||||
.arg("bs=1M");
|
||||
|
||||
match self.alloc_type {
|
||||
DiskAllocType::Fixed => cmd.arg(format!("count={}", self.size)),
|
||||
DiskAllocType::Sparse => cmd.arg(format!("seek={}", self.size)).arg("count=0"),
|
||||
};
|
||||
|
||||
let res = cmd.output()?;
|
||||
|
||||
if !res.status.success() {
|
||||
log::error!(
|
||||
"Failed to create disk! stderr={} stdout={}",
|
||||
String::from_utf8_lossy(&res.stderr),
|
||||
String::from_utf8_lossy(&res.stdout)
|
||||
);
|
||||
return Err(DisksError::Create.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
/// Check the existence of a required program
|
||||
pub fn check_program(name: &str, description: &str) {
|
||||
let path = Path::new(name);
|
||||
|
||||
if !path.exists() {
|
||||
panic!("{name} does not exist! {description}");
|
||||
}
|
||||
}
|
@ -1,468 +0,0 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::utils::file_size_utils::FileSize;
|
||||
use std::fs::File;
|
||||
use std::os::linux::fs::MetadataExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum DisksError {
|
||||
#[error("DiskParseError: {0}")]
|
||||
Parse(&'static str),
|
||||
#[error("DiskCreateError")]
|
||||
Create,
|
||||
#[error("DiskConvertError: {0}")]
|
||||
Convert(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Serialize, serde::Deserialize, Copy, Clone, PartialEq, Eq)]
|
||||
#[serde(tag = "format")]
|
||||
pub enum DiskFileFormat {
|
||||
Raw {
|
||||
#[serde(default)]
|
||||
is_sparse: bool,
|
||||
},
|
||||
QCow2 {
|
||||
#[serde(default)]
|
||||
virtual_size: FileSize,
|
||||
},
|
||||
GzCompressedRaw,
|
||||
GzCompressedQCow2,
|
||||
XzCompressedRaw,
|
||||
XzCompressedQCow2,
|
||||
}
|
||||
|
||||
impl DiskFileFormat {
|
||||
pub fn ext(&self) -> &'static [&'static str] {
|
||||
match self {
|
||||
DiskFileFormat::Raw { .. } => &["raw", ""],
|
||||
DiskFileFormat::QCow2 { .. } => &["qcow2"],
|
||||
DiskFileFormat::GzCompressedRaw => &["raw.gz"],
|
||||
DiskFileFormat::GzCompressedQCow2 => &["qcow2.gz"],
|
||||
DiskFileFormat::XzCompressedRaw => &["raw.xz"],
|
||||
DiskFileFormat::XzCompressedQCow2 => &["qcow2.xz"],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Disk file information
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct DiskFileInfo {
|
||||
pub file_path: PathBuf,
|
||||
pub file_size: FileSize,
|
||||
#[serde(flatten)]
|
||||
pub format: DiskFileFormat,
|
||||
pub file_name: String,
|
||||
pub name: String,
|
||||
pub created: u64,
|
||||
}
|
||||
|
||||
impl DiskFileInfo {
|
||||
/// Get disk image file information
|
||||
pub fn load_file(file: &Path) -> anyhow::Result<Self> {
|
||||
if !file.is_file() {
|
||||
return Err(DisksError::Parse("Path is not a file!").into());
|
||||
}
|
||||
|
||||
// Get file metadata
|
||||
let metadata = file.metadata()?;
|
||||
let mut name = file
|
||||
.file_stem()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("disk")
|
||||
.to_string();
|
||||
let ext = file.extension().and_then(|s| s.to_str()).unwrap_or("raw");
|
||||
|
||||
// Determine file format
|
||||
let format = match ext {
|
||||
"qcow2" => DiskFileFormat::QCow2 {
|
||||
virtual_size: qcow_virt_size(file)?,
|
||||
},
|
||||
"raw" => DiskFileFormat::Raw {
|
||||
is_sparse: metadata.len() / 512 >= metadata.st_blocks(),
|
||||
},
|
||||
"gz" if name.ends_with(".qcow2") => {
|
||||
name = name.strip_suffix(".qcow2").unwrap_or(&name).to_string();
|
||||
DiskFileFormat::GzCompressedQCow2
|
||||
}
|
||||
"gz" => DiskFileFormat::GzCompressedRaw,
|
||||
"xz" if name.ends_with(".qcow2") => {
|
||||
name = name.strip_suffix(".qcow2").unwrap_or(&name).to_string();
|
||||
DiskFileFormat::XzCompressedQCow2
|
||||
}
|
||||
"xz" => DiskFileFormat::XzCompressedRaw,
|
||||
_ => anyhow::bail!("Unsupported disk extension: {ext}!"),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
file_path: file.to_path_buf(),
|
||||
name,
|
||||
file_size: FileSize::from_bytes(metadata.len() as usize),
|
||||
format,
|
||||
file_name: file
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("")
|
||||
.to_string(),
|
||||
created: metadata
|
||||
.created()?
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new empty disk
|
||||
pub fn create(file: &Path, format: DiskFileFormat, size: FileSize) -> anyhow::Result<()> {
|
||||
// Prepare command to create file
|
||||
let res = match format {
|
||||
DiskFileFormat::Raw { is_sparse } => {
|
||||
let mut cmd = Command::new("/usr/bin/dd");
|
||||
cmd.arg("if=/dev/zero")
|
||||
.arg(format!("of={}", file.to_string_lossy()))
|
||||
.arg("bs=1M");
|
||||
|
||||
match is_sparse {
|
||||
false => cmd.arg(format!("count={}", size.as_mb())),
|
||||
true => cmd.arg(format!("seek={}", size.as_mb())).arg("count=0"),
|
||||
};
|
||||
|
||||
cmd.output()?
|
||||
}
|
||||
|
||||
DiskFileFormat::QCow2 { virtual_size } => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_QEMU_IMAGE);
|
||||
cmd.arg("create")
|
||||
.arg("-f")
|
||||
.arg("qcow2")
|
||||
.arg(file)
|
||||
.arg(format!("{}M", virtual_size.as_mb()));
|
||||
|
||||
cmd.output()?
|
||||
}
|
||||
_ => anyhow::bail!("Cannot create disk file image of this format: {format:?}!"),
|
||||
};
|
||||
|
||||
// Execute Linux command
|
||||
if !res.status.success() {
|
||||
log::error!(
|
||||
"Failed to create disk! stderr={} stdout={}",
|
||||
String::from_utf8_lossy(&res.stderr),
|
||||
String::from_utf8_lossy(&res.stdout)
|
||||
);
|
||||
return Err(DisksError::Create.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Copy / convert file disk image into a new destination with optionally a new file format
|
||||
pub fn convert(&self, dest_file: &Path, dest_format: DiskFileFormat) -> anyhow::Result<()> {
|
||||
// Create a temporary directory to perform the operation
|
||||
let temp_dir = tempfile::tempdir_in(&AppConfig::get().temp_dir)?;
|
||||
let temp_file = temp_dir
|
||||
.path()
|
||||
.join(format!("temp_file.{}", dest_format.ext()[0]));
|
||||
|
||||
// Prepare the conversion
|
||||
let mut cmd = match (self.format, dest_format) {
|
||||
// Decompress QCow2 (GZIP)
|
||||
(DiskFileFormat::GzCompressedQCow2, DiskFileFormat::QCow2 { .. }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_GZIP);
|
||||
cmd.arg("--keep")
|
||||
.arg("--decompress")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Decompress QCow2 (XZ)
|
||||
(DiskFileFormat::XzCompressedQCow2, DiskFileFormat::QCow2 { .. }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_XZ);
|
||||
cmd.arg("--stdout")
|
||||
.arg("--keep")
|
||||
.arg("--decompress")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Compress QCow2 (Gzip)
|
||||
(DiskFileFormat::QCow2 { .. }, DiskFileFormat::GzCompressedQCow2) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_GZIP);
|
||||
cmd.arg("--keep")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Compress QCow2 (Xz)
|
||||
(DiskFileFormat::QCow2 { .. }, DiskFileFormat::XzCompressedQCow2) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_XZ);
|
||||
cmd.arg("--keep")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Convert QCow2 to Raw file
|
||||
(DiskFileFormat::QCow2 { .. }, DiskFileFormat::Raw { is_sparse }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_QEMU_IMAGE);
|
||||
cmd.arg("convert")
|
||||
.arg("-f")
|
||||
.arg("qcow2")
|
||||
.arg("-O")
|
||||
.arg("raw")
|
||||
.arg(&self.file_path)
|
||||
.arg(&temp_file);
|
||||
|
||||
if !is_sparse {
|
||||
cmd.args(["-S", "0"]);
|
||||
}
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
// Clone a QCow file, using qemu-image instead of cp might improve "sparsification" of
|
||||
// file
|
||||
(DiskFileFormat::QCow2 { .. }, DiskFileFormat::QCow2 { .. }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_QEMU_IMAGE);
|
||||
cmd.arg("convert")
|
||||
.arg("-f")
|
||||
.arg("qcow2")
|
||||
.arg("-O")
|
||||
.arg("qcow2")
|
||||
.arg(&self.file_path)
|
||||
.arg(&temp_file);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Convert Raw to QCow2 file
|
||||
(DiskFileFormat::Raw { .. }, DiskFileFormat::QCow2 { .. }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_QEMU_IMAGE);
|
||||
cmd.arg("convert")
|
||||
.arg("-f")
|
||||
.arg("raw")
|
||||
.arg("-O")
|
||||
.arg("qcow2")
|
||||
.arg(&self.file_path)
|
||||
.arg(&temp_file);
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
// Render raw file non sparse
|
||||
(DiskFileFormat::Raw { is_sparse: true }, DiskFileFormat::Raw { is_sparse: false }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_COPY);
|
||||
cmd.arg("--sparse=never")
|
||||
.arg(&self.file_path)
|
||||
.arg(&temp_file);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Render raw file sparse
|
||||
(DiskFileFormat::Raw { is_sparse: false }, DiskFileFormat::Raw { is_sparse: true }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_DD);
|
||||
cmd.arg("conv=sparse")
|
||||
.arg(format!("if={}", self.file_path.display()))
|
||||
.arg(format!("of={}", temp_file.display()));
|
||||
cmd
|
||||
}
|
||||
|
||||
// Compress Raw (Gz)
|
||||
(DiskFileFormat::Raw { .. }, DiskFileFormat::GzCompressedRaw) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_GZIP);
|
||||
cmd.arg("--keep")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Compress Raw (Xz)
|
||||
(DiskFileFormat::Raw { .. }, DiskFileFormat::XzCompressedRaw) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_XZ);
|
||||
cmd.arg("--keep")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Decompress Raw (Gz) to not sparse file
|
||||
(DiskFileFormat::GzCompressedRaw, DiskFileFormat::Raw { is_sparse: false }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_GZIP);
|
||||
cmd.arg("--keep")
|
||||
.arg("--decompress")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
// Decompress Raw (Xz) to not sparse file
|
||||
(DiskFileFormat::XzCompressedRaw, DiskFileFormat::Raw { is_sparse: false }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_XZ);
|
||||
cmd.arg("--keep")
|
||||
.arg("--decompress")
|
||||
.arg("--to-stdout")
|
||||
.arg(&self.file_path)
|
||||
.stdout(File::create(&temp_file)?);
|
||||
cmd
|
||||
}
|
||||
|
||||
// Decompress Raw (Gz) to sparse file
|
||||
// https://benou.fr/www/ben/decompressing-sparse-files.html
|
||||
(DiskFileFormat::GzCompressedRaw, DiskFileFormat::Raw { is_sparse: true }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_BASH);
|
||||
cmd.arg("-c").arg(format!(
|
||||
"{} --decompress --to-stdout {} | {} conv=sparse of={}",
|
||||
constants::PROGRAM_GZIP,
|
||||
self.file_path.display(),
|
||||
constants::PROGRAM_DD,
|
||||
temp_file.display()
|
||||
));
|
||||
cmd
|
||||
}
|
||||
|
||||
// Decompress Raw (XZ) to sparse file
|
||||
// https://benou.fr/www/ben/decompressing-sparse-files.html
|
||||
(DiskFileFormat::XzCompressedRaw, DiskFileFormat::Raw { is_sparse: true }) => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_BASH);
|
||||
cmd.arg("-c").arg(format!(
|
||||
"{} --decompress --to-stdout {} | {} conv=sparse of={}",
|
||||
constants::PROGRAM_XZ,
|
||||
self.file_path.display(),
|
||||
constants::PROGRAM_DD,
|
||||
temp_file.display()
|
||||
));
|
||||
cmd
|
||||
}
|
||||
|
||||
// Dumb copy of file
|
||||
(a, b) if a == b => {
|
||||
let mut cmd = Command::new(constants::PROGRAM_COPY);
|
||||
cmd.arg("--sparse=auto")
|
||||
.arg(&self.file_path)
|
||||
.arg(&temp_file);
|
||||
cmd
|
||||
}
|
||||
|
||||
// By default, conversion is unsupported
|
||||
(src, dest) => {
|
||||
return Err(DisksError::Convert(format!(
|
||||
"Conversion from {src:?} to {dest:?} is not supported!"
|
||||
))
|
||||
.into());
|
||||
}
|
||||
};
|
||||
|
||||
// Execute the conversion
|
||||
let command_s = format!(
|
||||
"{} {}",
|
||||
cmd.get_program().display(),
|
||||
cmd.get_args()
|
||||
.map(|a| format!("'{}'", a.display()))
|
||||
.collect::<Vec<String>>()
|
||||
.join(" ")
|
||||
);
|
||||
let cmd_output = cmd.output()?;
|
||||
if !cmd_output.status.success() {
|
||||
return Err(DisksError::Convert(format!(
|
||||
"Command failed:\n{command_s}\nStatus: {}\nstdout: {}\nstderr: {}",
|
||||
cmd_output.status,
|
||||
String::from_utf8_lossy(&cmd_output.stdout),
|
||||
String::from_utf8_lossy(&cmd_output.stderr)
|
||||
))
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check the file was created
|
||||
if !temp_file.is_file() {
|
||||
return Err(DisksError::Convert(
|
||||
"Temporary was not created after execution of command!".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Move the file to its final location
|
||||
std::fs::rename(temp_file, dest_file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get disk virtual size, if available
|
||||
pub fn virtual_size(&self) -> Option<FileSize> {
|
||||
match self.format {
|
||||
DiskFileFormat::Raw { .. } => Some(self.file_size),
|
||||
DiskFileFormat::QCow2 { virtual_size } => Some(virtual_size),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Resize disk
|
||||
pub fn resize(&self, new_size: FileSize) -> anyhow::Result<()> {
|
||||
if new_size <= self.virtual_size().unwrap_or(new_size) {
|
||||
anyhow::bail!("Shrinking disk image file is not supported!");
|
||||
}
|
||||
|
||||
let mut cmd = Command::new(constants::PROGRAM_QEMU_IMAGE);
|
||||
cmd.arg("resize")
|
||||
.arg("-f")
|
||||
.arg(match self.format {
|
||||
DiskFileFormat::QCow2 { .. } => "qcow2",
|
||||
DiskFileFormat::Raw { .. } => "raw",
|
||||
f => anyhow::bail!("Unsupported disk format for resize: {f:?}"),
|
||||
})
|
||||
.arg(&self.file_path)
|
||||
.arg(new_size.as_bytes().to_string());
|
||||
|
||||
let output = cmd.output()?;
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"{} info failed, status: {}, stderr: {}",
|
||||
constants::PROGRAM_QEMU_IMAGE,
|
||||
output.status,
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct QCowInfoOutput {
|
||||
#[serde(rename = "virtual-size")]
|
||||
virtual_size: usize,
|
||||
}
|
||||
|
||||
/// Get QCow2 virtual size
|
||||
fn qcow_virt_size(path: &Path) -> anyhow::Result<FileSize> {
|
||||
// Run qemu-img
|
||||
let mut cmd = Command::new(constants::PROGRAM_QEMU_IMAGE);
|
||||
cmd.args([
|
||||
"info",
|
||||
path.to_str().unwrap_or(""),
|
||||
"--output",
|
||||
"json",
|
||||
"--force-share",
|
||||
]);
|
||||
let output = cmd.output()?;
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"{} info failed, status: {}, stderr: {}",
|
||||
constants::PROGRAM_QEMU_IMAGE,
|
||||
output.status,
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
let res_json = String::from_utf8(output.stdout)?;
|
||||
|
||||
// Decode JSON
|
||||
let decoded: QCowInfoOutput = serde_json::from_str(&res_json)?;
|
||||
Ok(FileSize::from_bytes(decoded.virtual_size))
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
use std::ops::Mul;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum FilesSizeUtilsError {
|
||||
#[error("UnitConvertError: {0}")]
|
||||
UnitConvert(String),
|
||||
}
|
||||
|
||||
/// Holds a data size, convertible in any form
|
||||
#[derive(
|
||||
serde::Serialize,
|
||||
serde::Deserialize,
|
||||
Copy,
|
||||
Clone,
|
||||
Debug,
|
||||
Eq,
|
||||
PartialEq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Default,
|
||||
)]
|
||||
pub struct FileSize(usize);
|
||||
|
||||
impl FileSize {
|
||||
pub const fn from_bytes(size: usize) -> Self {
|
||||
Self(size)
|
||||
}
|
||||
|
||||
pub const fn from_mb(mb: usize) -> Self {
|
||||
Self(mb * 1000 * 1000)
|
||||
}
|
||||
|
||||
pub const fn from_gb(gb: usize) -> Self {
|
||||
Self(gb * 1000 * 1000 * 1000)
|
||||
}
|
||||
|
||||
/// Convert size unit to MB
|
||||
pub fn from_size_unit(unit: &str, value: usize) -> anyhow::Result<Self> {
|
||||
let fact = match unit {
|
||||
"bytes" | "b" => 1f64,
|
||||
"KB" => 1000f64,
|
||||
"MB" => 1000f64 * 1000f64,
|
||||
"GB" => 1000f64 * 1000f64 * 1000f64,
|
||||
"TB" => 1000f64 * 1000f64 * 1000f64 * 1000f64,
|
||||
|
||||
"k" | "KiB" => 1024f64,
|
||||
"M" | "MiB" => 1024f64 * 1024f64,
|
||||
"G" | "GiB" => 1024f64 * 1024f64 * 1024f64,
|
||||
"T" | "TiB" => 1024f64 * 1024f64 * 1024f64 * 1024f64,
|
||||
|
||||
_ => {
|
||||
return Err(
|
||||
FilesSizeUtilsError::UnitConvert(format!("Unknown size unit: {unit}")).into(),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Self((value as f64).mul(fact).ceil() as usize))
|
||||
}
|
||||
|
||||
/// Get file size as bytes
|
||||
pub fn as_bytes(&self) -> usize {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Get file size as megabytes
|
||||
pub fn as_mb(&self) -> usize {
|
||||
self.0 / (1000 * 1000)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::utils::file_size_utils::FileSize;
|
||||
|
||||
#[test]
|
||||
fn convert_units_mb() {
|
||||
assert_eq!(FileSize::from_size_unit("MB", 1).unwrap().as_mb(), 1);
|
||||
assert_eq!(FileSize::from_size_unit("MB", 1000).unwrap().as_mb(), 1000);
|
||||
assert_eq!(
|
||||
FileSize::from_size_unit("GB", 1000).unwrap().as_mb(),
|
||||
1000 * 1000
|
||||
);
|
||||
assert_eq!(FileSize::from_size_unit("GB", 1).unwrap().as_mb(), 1000);
|
||||
assert_eq!(FileSize::from_size_unit("GiB", 3).unwrap().as_mb(), 3221);
|
||||
assert_eq!(
|
||||
FileSize::from_size_unit("KiB", 488281).unwrap().as_mb(),
|
||||
499
|
||||
);
|
||||
}
|
||||
}
|
@ -1,6 +1,13 @@
|
||||
use std::ops::{Div, Mul};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum FilesUtilsError {
|
||||
#[error("UnitConvertError: {0}")]
|
||||
UnitConvert(String),
|
||||
}
|
||||
|
||||
const INVALID_CHARS: [&str; 19] = [
|
||||
"@", "\\", "/", ":", ",", "<", ">", "%", "'", "\"", "?", "{", "}", "$", "*", "|", ";", "=",
|
||||
"\t",
|
||||
@ -28,9 +35,31 @@ pub fn set_file_permission<P: AsRef<Path>>(path: P, mode: u32) -> anyhow::Result
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert size unit to MB
|
||||
pub fn convert_size_unit_to_mb(unit: &str, value: usize) -> anyhow::Result<usize> {
|
||||
let fact = match unit {
|
||||
"bytes" | "b" => 1f64,
|
||||
"KB" => 1000f64,
|
||||
"MB" => 1000f64 * 1000f64,
|
||||
"GB" => 1000f64 * 1000f64 * 1000f64,
|
||||
"TB" => 1000f64 * 1000f64 * 1000f64 * 1000f64,
|
||||
|
||||
"k" | "KiB" => 1024f64,
|
||||
"M" | "MiB" => 1024f64 * 1024f64,
|
||||
"G" | "GiB" => 1024f64 * 1024f64 * 1024f64,
|
||||
"T" | "TiB" => 1024f64 * 1024f64 * 1024f64 * 1024f64,
|
||||
|
||||
_ => {
|
||||
return Err(FilesUtilsError::UnitConvert(format!("Unknown size unit: {unit}")).into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok((value as f64).mul(fact.div((1000 * 1000) as f64)).ceil() as usize)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::utils::files_utils::check_file_name;
|
||||
use crate::utils::files_utils::{check_file_name, convert_size_unit_to_mb};
|
||||
|
||||
#[test]
|
||||
fn empty_file_name() {
|
||||
@ -56,4 +85,14 @@ mod test {
|
||||
fn valid_file_name() {
|
||||
assert!(check_file_name("test.iso"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn convert_units_mb() {
|
||||
assert_eq!(convert_size_unit_to_mb("MB", 1).unwrap(), 1);
|
||||
assert_eq!(convert_size_unit_to_mb("MB", 1000).unwrap(), 1000);
|
||||
assert_eq!(convert_size_unit_to_mb("GB", 1000).unwrap(), 1000 * 1000);
|
||||
assert_eq!(convert_size_unit_to_mb("GB", 1).unwrap(), 1000);
|
||||
assert_eq!(convert_size_unit_to_mb("GiB", 3).unwrap(), 3222);
|
||||
assert_eq!(convert_size_unit_to_mb("KiB", 488281).unwrap(), 500);
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,6 @@
|
||||
pub mod cloud_init_utils;
|
||||
pub mod exec_utils;
|
||||
pub mod file_disks_utils;
|
||||
pub mod file_size_utils;
|
||||
pub mod disks_utils;
|
||||
pub mod files_utils;
|
||||
pub mod net_utils;
|
||||
pub mod rand_utils;
|
||||
pub mod time_utils;
|
||||
pub mod url_utils;
|
||||
pub mod vm_file_disks_utils;
|
||||
|
@ -1,8 +1,6 @@
|
||||
use crate::constants;
|
||||
use nix::sys::socket::{AddressFamily, SockaddrLike};
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::process::Command;
|
||||
use std::str::FromStr;
|
||||
use sysinfo::Networks;
|
||||
|
||||
@ -59,7 +57,7 @@ pub fn is_net_interface_name_valid<D: AsRef<str>>(int: D) -> bool {
|
||||
/// Get the list of available network interfaces
|
||||
pub fn net_list() -> Vec<String> {
|
||||
let mut networks = Networks::new();
|
||||
networks.refresh(true);
|
||||
networks.refresh_list();
|
||||
|
||||
networks
|
||||
.list()
|
||||
@ -70,7 +68,7 @@ pub fn net_list() -> Vec<String> {
|
||||
|
||||
/// Get the list of available network interfaces associated with their IP address
|
||||
pub fn net_list_and_ips() -> anyhow::Result<HashMap<String, Vec<IpAddr>>> {
|
||||
let addrs = nix::ifaddrs::getifaddrs()?;
|
||||
let addrs = nix::ifaddrs::getifaddrs().unwrap();
|
||||
|
||||
let mut res = HashMap::new();
|
||||
|
||||
@ -138,31 +136,6 @@ pub fn net_list_and_ips() -> anyhow::Result<HashMap<String, Vec<IpAddr>>> {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct IPBridgeInfo {
|
||||
ifname: String,
|
||||
}
|
||||
|
||||
/// Get the list of bridge interfaces
|
||||
pub fn bridges_list() -> anyhow::Result<Vec<String>> {
|
||||
let mut cmd = Command::new(constants::PROGRAM_IP);
|
||||
cmd.args(["-json", "link", "show", "type", "bridge"]);
|
||||
let output = cmd.output()?;
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"{} failed, status: {}, stderr: {}",
|
||||
constants::PROGRAM_IP,
|
||||
output.status,
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
// Parse JSON result
|
||||
let res: Vec<IPBridgeInfo> = serde_json::from_str(&String::from_utf8_lossy(&output.stdout))?;
|
||||
|
||||
Ok(res.iter().map(|ip| ip.ifname.clone()).collect())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::utils::net_utils::{
|
||||
|
@ -1,6 +1,12 @@
|
||||
use rand::distr::{Alphanumeric, SampleString};
|
||||
use rand::distributions::Alphanumeric;
|
||||
use rand::Rng;
|
||||
|
||||
/// Generate a random string
|
||||
pub fn rand_str(len: usize) -> String {
|
||||
Alphanumeric.sample_string(&mut rand::rng(), len)
|
||||
let s: String = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(len)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
s
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
use chrono::Datelike;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
/// Get the current time since epoch
|
||||
@ -14,15 +13,3 @@ pub fn time() -> u64 {
|
||||
.unwrap()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
/// Format given UNIX time in a simple format
|
||||
pub fn format_date(time: i64) -> anyhow::Result<String> {
|
||||
let date = chrono::DateTime::from_timestamp(time, 0).ok_or(anyhow::anyhow!("invalid date"))?;
|
||||
|
||||
Ok(format!(
|
||||
"{:0>2}/{:0>2}/{}",
|
||||
date.day(),
|
||||
date.month(),
|
||||
date.year()
|
||||
))
|
||||
}
|
||||
|
@ -1,190 +0,0 @@
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::libvirt_lib_structures::XMLUuid;
|
||||
use crate::utils::file_disks_utils::{DiskFileFormat, DiskFileInfo};
|
||||
use crate::utils::file_size_utils::FileSize;
|
||||
use crate::utils::files_utils;
|
||||
use lazy_regex::regex;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
enum VMDisksError {
|
||||
#[error("DiskConfigError: {0}")]
|
||||
Config(&'static str),
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub enum VMDiskBus {
|
||||
Virtio,
|
||||
SATA,
|
||||
}
|
||||
|
||||
/// Disk allocation type
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(tag = "format")]
|
||||
pub enum VMDiskFormat {
|
||||
Raw {
|
||||
/// Is raw file a sparse file?
|
||||
is_sparse: bool,
|
||||
},
|
||||
QCow2,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
pub struct VMFileDisk {
|
||||
/// Disk name
|
||||
pub name: String,
|
||||
/// Disk size, in bytes
|
||||
pub size: FileSize,
|
||||
/// Disk bus
|
||||
pub bus: VMDiskBus,
|
||||
/// Disk format
|
||||
#[serde(flatten)]
|
||||
pub format: VMDiskFormat,
|
||||
/// When creating a new disk, specify the disk image template to use
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub from_image: Option<String>,
|
||||
/// Set this variable to true to resize disk image
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub resize: Option<bool>,
|
||||
/// Set this variable to true to delete the disk
|
||||
pub delete: bool,
|
||||
}
|
||||
|
||||
impl VMFileDisk {
|
||||
pub fn load_from_file(path: &str, bus: &str) -> anyhow::Result<Self> {
|
||||
let file = Path::new(path);
|
||||
|
||||
let info = DiskFileInfo::load_file(file)?;
|
||||
|
||||
Ok(Self {
|
||||
name: info.name,
|
||||
|
||||
// Get only the virtual size of the file
|
||||
size: match info.format {
|
||||
DiskFileFormat::Raw { .. } => info.file_size,
|
||||
DiskFileFormat::QCow2 { virtual_size } => virtual_size,
|
||||
_ => anyhow::bail!("Unsupported image format: {:?}", info.format),
|
||||
},
|
||||
|
||||
format: match info.format {
|
||||
DiskFileFormat::Raw { is_sparse } => VMDiskFormat::Raw { is_sparse },
|
||||
DiskFileFormat::QCow2 { .. } => VMDiskFormat::QCow2,
|
||||
_ => anyhow::bail!("Unsupported image format: {:?}", info.format),
|
||||
},
|
||||
|
||||
bus: match bus {
|
||||
"virtio" => VMDiskBus::Virtio,
|
||||
"sata" => VMDiskBus::SATA,
|
||||
_ => anyhow::bail!("Unsupported disk bus type: {bus}"),
|
||||
},
|
||||
|
||||
delete: false,
|
||||
from_image: None,
|
||||
resize: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn check_config(&self) -> anyhow::Result<()> {
|
||||
if constants::DISK_NAME_MIN_LEN > self.name.len()
|
||||
|| constants::DISK_NAME_MAX_LEN < self.name.len()
|
||||
{
|
||||
return Err(VMDisksError::Config("Disk name length is invalid").into());
|
||||
}
|
||||
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(&self.name) {
|
||||
return Err(VMDisksError::Config("Disk name contains invalid characters!").into());
|
||||
}
|
||||
|
||||
// Check disk size
|
||||
if !(constants::DISK_SIZE_MIN..=constants::DISK_SIZE_MAX).contains(&self.size) {
|
||||
return Err(VMDisksError::Config("Disk size is invalid!").into());
|
||||
}
|
||||
|
||||
// Check specified disk image template
|
||||
if let Some(disk_image) = &self.from_image {
|
||||
if !files_utils::check_file_name(disk_image) {
|
||||
return Err(VMDisksError::Config("Disk image template name is not valid!").into());
|
||||
}
|
||||
|
||||
if !AppConfig::get().disk_images_file_path(disk_image).is_file() {
|
||||
return Err(
|
||||
VMDisksError::Config("Specified disk image file does not exist!").into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get disk path on file system
|
||||
pub fn disk_path(&self, id: XMLUuid) -> PathBuf {
|
||||
let domain_dir = AppConfig::get().vm_storage_path(id);
|
||||
let file_name = match self.format {
|
||||
VMDiskFormat::Raw { .. } => self.name.to_string(),
|
||||
VMDiskFormat::QCow2 => format!("{}.qcow2", self.name),
|
||||
};
|
||||
domain_dir.join(&file_name)
|
||||
}
|
||||
|
||||
/// Apply disk configuration
|
||||
pub fn apply_config(&self, id: XMLUuid) -> anyhow::Result<()> {
|
||||
self.check_config()?;
|
||||
|
||||
let file = self.disk_path(id);
|
||||
files_utils::create_directory_if_missing(file.parent().unwrap())?;
|
||||
|
||||
// Delete file if requested
|
||||
if self.delete {
|
||||
if !file.exists() {
|
||||
log::debug!("File {file:?} does not exists, so it was not deleted");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::info!("Deleting {file:?}");
|
||||
std::fs::remove_file(file)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if file.exists() {
|
||||
log::debug!("File {file:?} does not exists, so it was not touched");
|
||||
}
|
||||
// Create disk if required
|
||||
else {
|
||||
// Determine file format
|
||||
let format = match self.format {
|
||||
VMDiskFormat::Raw { is_sparse } => DiskFileFormat::Raw { is_sparse },
|
||||
VMDiskFormat::QCow2 => DiskFileFormat::QCow2 {
|
||||
virtual_size: self.size,
|
||||
},
|
||||
};
|
||||
|
||||
// Create / Restore disk file
|
||||
match &self.from_image {
|
||||
// Create disk file
|
||||
None => {
|
||||
DiskFileInfo::create(&file, format, self.size)?;
|
||||
}
|
||||
|
||||
// Restore disk image template
|
||||
Some(disk_img) => {
|
||||
let src_file =
|
||||
DiskFileInfo::load_file(&AppConfig::get().disk_images_file_path(disk_img))?;
|
||||
src_file.convert(&file, format)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Resize disk file if requested
|
||||
if self.resize == Some(true) {
|
||||
let disk = DiskFileInfo::load_file(&file)?;
|
||||
|
||||
// Can only increase disk size
|
||||
if let Err(e) = disk.resize(self.size) {
|
||||
log::error!("Failed to resize disk file {}: {e:?}", self.name);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
# Bridges
|
||||
|
||||
Bridges can be used to connect virtual machines to networks.
|
||||
|
||||
## Setup Bridge on Ubuntu
|
||||
|
||||
1. Install dependencies:
|
||||
|
||||
```bash
|
||||
sudo apt install bridge-utils
|
||||
```
|
||||
|
||||
2. Adapt your netplan configuration to set the following:
|
||||
|
||||
```yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
enp2s0:
|
||||
dhcp4: no
|
||||
bridges:
|
||||
br0: # Bridge name
|
||||
dhcp4: yes
|
||||
interfaces:
|
||||
- enp2s0 # Set to your interface
|
||||
```
|
||||
|
||||
|
||||
3. Apply netplan configuration:
|
||||
|
||||
```bash
|
||||
sudo netplan apply
|
||||
```
|
||||
|
||||
|
||||
4. Get the state and the list of bridges in the system:
|
||||
|
||||
```bash
|
||||
sudo brctl show
|
||||
|
||||
# Or
|
||||
ip link show type bridge
|
||||
```
|
||||
|
||||
## Reference
|
||||
[How to Configure Network Bridge in Ubuntu](https://www.tecmint.com/create-network-bridge-in-ubuntu/)
|
@ -1,11 +0,0 @@
|
||||
## References
|
||||
|
||||
### LibVirt XML documentation
|
||||
* Online: https://libvirt.org/format.html
|
||||
|
||||
* Offline with Ubuntu:
|
||||
|
||||
```bash
|
||||
sudo apt install libvirt-doc
|
||||
firefox /usr/share/doc/libvirt-doc/html/index.html
|
||||
```
|
@ -5,9 +5,9 @@
|
||||
sudo apt install libvirt-dev
|
||||
```
|
||||
|
||||
2. Libvirt and cloud image utilities must also be installed:
|
||||
2. Libvirt must also be installed:
|
||||
```bash
|
||||
sudo apt install qemu-kvm libvirt-daemon-system cloud-image-utils
|
||||
sudo apt install qemu-kvm libvirt-daemon-system
|
||||
```
|
||||
|
||||
3. Allow the current user to manage VMs:
|
||||
@ -34,7 +34,7 @@ docker compose up
|
||||
sudo mkdir /var/virtweb
|
||||
sudo chown $USER:$USER /var/virtweb
|
||||
cd virtweb_backend
|
||||
cargo fmt && cargo clippy && cargo run -- -s /var/virtweb --hypervisor-uri "qemu:///system" --website-origin "http://localhost:5173"
|
||||
cargo fmt && cargo clippy && cargo run -- -s /var/virtweb --hypervisor-uri "qemu:///system"
|
||||
```
|
||||
|
||||
7. Run the frontend
|
||||
|
@ -9,13 +9,13 @@ make
|
||||
|
||||
The release file will be available in `virtweb_backend/target/release/virtweb_backend`.
|
||||
|
||||
This is the only artifact that must be copied to the server. It is recommended to copy it to the `/usr/local/bin` directory.
|
||||
This is the only artifcat that must be copied to the server. It is recommended to copy it to the `/usr/local/bin` directory.
|
||||
|
||||
## Install requirements
|
||||
In order to work properly, VirtWeb relies on `libvirt`, `qemu`, `kvm` and `cloud-localds`:
|
||||
In order to work properly, VirtWeb relies on `libvirt`, `qemu` and `kvm`:
|
||||
|
||||
```bash
|
||||
sudo apt install qemu-kvm libvirt-daemon-system libvirt0 libvirt-clients libvirt-daemon bridge-utils cloud-image-utils
|
||||
sudo apt install qemu-kvm libvirt-daemon-system libvirt0 libvirt-clients libvirt-daemon bridge-utils
|
||||
```
|
||||
|
||||
## Dedicated user
|
||||
@ -61,7 +61,7 @@ STORAGE=/home/virtweb/storage
|
||||
HYPERVISOR_URI=qemu:///system
|
||||
```
|
||||
|
||||
> Note: `HYPERVISOR_URI=qemu:///system` is used to specify that we want to use the main hypervisor.
|
||||
> Note: `HYPERVISOR_URI=qemu:///system` is used to sepcify that we want to use the main hypervisor.
|
||||
|
||||
## Register Virtweb service
|
||||
Before registering service, check that the configuration works correctly:
|
||||
|
@ -1,12 +1,46 @@
|
||||
# Virtweb frontend
|
||||
Built with Vite + React + TypeScript
|
||||
# Getting Started with Create React App
|
||||
|
||||
## Get dependencies
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
|
||||
|
||||
# Run for developpment
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
## Available Scripts
|
||||
|
||||
In the project directory, you can run:
|
||||
|
||||
### `npm start`
|
||||
|
||||
Runs the app in the development mode.\
|
||||
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
|
||||
|
||||
The page will reload if you make edits.\
|
||||
You will also see any lint errors in the console.
|
||||
|
||||
### `npm test`
|
||||
|
||||
Launches the test runner in the interactive watch mode.\
|
||||
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
|
||||
|
||||
### `npm run build`
|
||||
|
||||
Builds the app for production to the `build` folder.\
|
||||
It correctly bundles React in production mode and optimizes the build for the best performance.
|
||||
|
||||
The build is minified and the filenames include the hashes.\
|
||||
Your app is ready to be deployed!
|
||||
|
||||
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
|
||||
|
||||
### `npm run eject`
|
||||
|
||||
**Note: this is a one-way operation. Once you `eject`, you can’t go back!**
|
||||
|
||||
If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
|
||||
|
||||
Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own.
|
||||
|
||||
You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it.
|
||||
|
||||
## Learn More
|
||||
|
||||
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
|
||||
|
||||
To learn React, check out the [React documentation](https://reactjs.org/).
|
||||
|
@ -1,54 +0,0 @@
|
||||
import js from "@eslint/js";
|
||||
import reactDom from "eslint-plugin-react-dom";
|
||||
import reactHooks from "eslint-plugin-react-hooks";
|
||||
import reactRefresh from "eslint-plugin-react-refresh";
|
||||
import reactX from "eslint-plugin-react-x";
|
||||
import globals from "globals";
|
||||
import tseslint from "typescript-eslint";
|
||||
|
||||
export default tseslint.config(
|
||||
{ ignores: ["dist"] },
|
||||
{
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
...tseslint.configs.strictTypeChecked,
|
||||
...tseslint.configs.stylisticTypeChecked,
|
||||
],
|
||||
files: ["**/*.{ts,tsx}"],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
parserOptions: {
|
||||
project: ["./tsconfig.node.json", "./tsconfig.app.json"],
|
||||
tsconfigRootDir: import.meta.dirname,
|
||||
},
|
||||
},
|
||||
plugins: {
|
||||
"react-hooks": reactHooks,
|
||||
"react-refresh": reactRefresh,
|
||||
"react-x": reactX,
|
||||
"react-dom": reactDom,
|
||||
},
|
||||
rules: {
|
||||
...reactHooks.configs.recommended.rules,
|
||||
"react-refresh/only-export-components": [
|
||||
"warn",
|
||||
{ allowConstantExport: true },
|
||||
],
|
||||
...reactX.configs["recommended-typescript"].rules,
|
||||
...reactDom.configs.recommended.rules,
|
||||
"@typescript-eslint/no-non-null-assertion": "off",
|
||||
"@typescript-eslint/no-misused-promises": "off",
|
||||
"@typescript-eslint/no-floating-promises": "off",
|
||||
"@typescript-eslint/restrict-template-expressions": "off",
|
||||
"@typescript-eslint/no-extraneous-class": "off",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"@typescript-eslint/no-unsafe-assignment": "off",
|
||||
"@typescript-eslint/no-unsafe-return": "off",
|
||||
"@typescript-eslint/no-unsafe-call": "off",
|
||||
"@typescript-eslint/no-unsafe-member-access": "off",
|
||||
"@typescript-eslint/no-unsafe-argument": "off",
|
||||
"react-refresh/only-export-components": "off",
|
||||
},
|
||||
}
|
||||
);
|
22887
virtweb_frontend/package-lock.json
generated
22887
virtweb_frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -3,54 +3,63 @@
|
||||
"version": "0.1.0",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@emotion/react": "^11.11.1",
|
||||
"@emotion/styled": "^11.11.0",
|
||||
"@fontsource/roboto": "^5.0.13",
|
||||
"@mdi/js": "^7.2.96",
|
||||
"@mdi/react": "^1.6.1",
|
||||
"@mui/icons-material": "^5.14.7",
|
||||
"@mui/material": "^5.14.7",
|
||||
"@mui/x-charts": "^7.3.0",
|
||||
"@mui/x-data-grid": "^7.3.0",
|
||||
"@testing-library/jest-dom": "^6.4.2",
|
||||
"@testing-library/react": "^16.0.0",
|
||||
"@testing-library/user-event": "^14.5.2",
|
||||
"@types/humanize-duration": "^3.27.1",
|
||||
"@types/jest": "^29.5.12",
|
||||
"@types/react": "^18.2.79",
|
||||
"@types/react-dom": "^18.2.25",
|
||||
"@types/react-syntax-highlighter": "^15.5.11",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-react": "^4.2.1",
|
||||
"date-and-time": "^3.1.1",
|
||||
"filesize": "^10.0.12",
|
||||
"humanize-duration": "^3.29.0",
|
||||
"mui-file-input": "^4.0.4",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-router-dom": "^6.23.0",
|
||||
"react-syntax-highlighter": "^15.5.0",
|
||||
"react-vnc": "^1.0.0",
|
||||
"typescript": "^4.0.0",
|
||||
"uuid": "^10.0.0",
|
||||
"vite": "^5.2.10",
|
||||
"vite-tsconfig-paths": "^4.2.2",
|
||||
"web-vitals": "^3.5.2",
|
||||
"xml-formatter": "^3.6.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "tsc -b && vite build",
|
||||
"lint": "eslint .",
|
||||
"start": "vite",
|
||||
"build": "tsc && vite build",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@emotion/react": "^11.14.0",
|
||||
"@emotion/styled": "^11.14.0",
|
||||
"@fontsource/roboto": "^5.2.6",
|
||||
"@mdi/js": "^7.4.47",
|
||||
"@mdi/react": "^1.6.1",
|
||||
"@monaco-editor/react": "^4.7.0",
|
||||
"@mui/icons-material": "^7.1.1",
|
||||
"@mui/material": "^7.1.1",
|
||||
"@mui/x-charts": "^8.3.1",
|
||||
"@mui/x-data-grid": "^8.3.1",
|
||||
"date-and-time": "^3.6.0",
|
||||
"filesize": "^10.1.6",
|
||||
"humanize-duration": "^3.32.2",
|
||||
"monaco-editor": "^0.52.2",
|
||||
"monaco-yaml": "^5.4.0",
|
||||
"react": "^19.1.0",
|
||||
"react-dom": "^19.1.0",
|
||||
"react-router-dom": "^7.6.2",
|
||||
"react-syntax-highlighter": "^15.6.1",
|
||||
"react-vnc": "^3.1.0",
|
||||
"uuid": "^11.1.0",
|
||||
"xml-formatter": "^3.6.6",
|
||||
"yaml": "^2.8.0"
|
||||
"eslintConfig": {
|
||||
"extends": [
|
||||
"react-app",
|
||||
"react-app/jest"
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.27.0",
|
||||
"@types/humanize-duration": "^3.27.4",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/react": "^19.1.8",
|
||||
"@types/react-dom": "^19.1.6",
|
||||
"@types/react-syntax-highlighter": "^15.5.13",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-react": "^4.4.1",
|
||||
"eslint": "^9.27.0",
|
||||
"eslint-plugin-react-dom": "^1.49.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.20",
|
||||
"eslint-plugin-react-x": "^1.49.0",
|
||||
"globals": "^16.1.0",
|
||||
"typescript": "^5.8.3",
|
||||
"typescript-eslint": "^8.32.1",
|
||||
"vite": "^6.3.5"
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,6 @@ import { LoginRoute } from "./routes/auth/LoginRoute";
|
||||
import { OIDCCbRoute } from "./routes/auth/OIDCCbRoute";
|
||||
import { BaseAuthenticatedPage } from "./widgets/BaseAuthenticatedPage";
|
||||
import { BaseLoginPage } from "./widgets/BaseLoginPage";
|
||||
import { DiskImagesRoute } from "./routes/DiskImagesRoute";
|
||||
|
||||
interface AuthContext {
|
||||
signedIn: boolean;
|
||||
@ -52,10 +51,7 @@ export function App() {
|
||||
|
||||
const context: AuthContext = {
|
||||
signedIn: signedIn,
|
||||
setSignedIn: (s) => {
|
||||
setSignedIn(s);
|
||||
location.reload();
|
||||
},
|
||||
setSignedIn: (s) => setSignedIn(s),
|
||||
};
|
||||
|
||||
const router = createBrowserRouter(
|
||||
@ -64,8 +60,6 @@ export function App() {
|
||||
<Route path="*" element={<BaseAuthenticatedPage />}>
|
||||
<Route path="" element={<HomeRoute />} />
|
||||
|
||||
<Route path="disk_images" element={<DiskImagesRoute />} />
|
||||
|
||||
<Route path="iso" element={<IsoFilesRoute />} />
|
||||
|
||||
<Route path="vms" element={<VMListRoute />} />
|
||||
@ -103,12 +97,12 @@ export function App() {
|
||||
);
|
||||
|
||||
return (
|
||||
<AuthContextK value={context}>
|
||||
<AuthContextK.Provider value={context}>
|
||||
<RouterProvider router={router} />
|
||||
</AuthContextK>
|
||||
</AuthContextK.Provider>
|
||||
);
|
||||
}
|
||||
|
||||
export function useAuth(): AuthContext {
|
||||
return React.use(AuthContextK)!;
|
||||
return React.useContext(AuthContextK)!;
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ export class APIClient {
|
||||
* Get backend URL
|
||||
*/
|
||||
static backendURL(): string {
|
||||
const URL = String(import.meta.env.VITE_APP_BACKEND ?? "");
|
||||
const URL = import.meta.env.VITE_APP_BACKEND ?? "";
|
||||
if (URL.length === 0) throw new Error("Backend URL undefined!");
|
||||
return URL;
|
||||
}
|
||||
@ -44,7 +44,7 @@ export class APIClient {
|
||||
*/
|
||||
static async exec(args: RequestParams): Promise<APIResponse> {
|
||||
let body: string | undefined | FormData = undefined;
|
||||
const headers: any = {};
|
||||
let headers: any = {};
|
||||
|
||||
// JSON request
|
||||
if (args.jsonData) {
|
||||
@ -66,25 +66,22 @@ export class APIClient {
|
||||
if (args.upProgress) {
|
||||
const res: XMLHttpRequest = await new Promise((resolve, reject) => {
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.upload.addEventListener("progress", (e) => {
|
||||
args.upProgress!(e.loaded / e.total);
|
||||
});
|
||||
xhr.addEventListener("load", () => {
|
||||
resolve(xhr);
|
||||
});
|
||||
xhr.addEventListener("error", () => {
|
||||
reject(new Error("File upload failed"));
|
||||
});
|
||||
xhr.addEventListener("abort", () => {
|
||||
reject(new Error("File upload aborted"));
|
||||
});
|
||||
xhr.addEventListener("timeout", () => {
|
||||
reject(new Error("File upload timeout"));
|
||||
});
|
||||
xhr.upload.addEventListener("progress", (e) =>
|
||||
args.upProgress!(e.loaded / e.total)
|
||||
);
|
||||
xhr.addEventListener("load", () => resolve(xhr));
|
||||
xhr.addEventListener("error", () =>
|
||||
reject(new Error("File upload failed"))
|
||||
);
|
||||
xhr.addEventListener("abort", () =>
|
||||
reject(new Error("File upload aborted"))
|
||||
);
|
||||
xhr.addEventListener("timeout", () =>
|
||||
reject(new Error("File upload timeout"))
|
||||
);
|
||||
xhr.open(args.method, url, true);
|
||||
xhr.withCredentials = true;
|
||||
for (const key in headers) {
|
||||
// eslint-disable-next-line no-prototype-builtins
|
||||
if (headers.hasOwnProperty(key))
|
||||
xhr.setRequestHeader(key, headers[key]);
|
||||
}
|
||||
@ -103,7 +100,6 @@ export class APIClient {
|
||||
body: body,
|
||||
headers: headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(50 * 1000 * 1000),
|
||||
});
|
||||
|
||||
// Process response
|
||||
|
@ -1,119 +0,0 @@
|
||||
import { APIClient } from "./ApiClient";
|
||||
import { VMFileDisk, VMInfo } from "./VMApi";
|
||||
|
||||
export type DiskImageFormat =
|
||||
| { format: "Raw"; is_sparse: boolean }
|
||||
| { format: "QCow2"; virtual_size?: number }
|
||||
| { format: "GzCompressedQCow2" }
|
||||
| { format: "GzCompressedRaw" }
|
||||
| { format: "XzCompressedQCow2" }
|
||||
| { format: "XzCompressedRaw" };
|
||||
|
||||
export type DiskImage = {
|
||||
file_size: number;
|
||||
file_name: string;
|
||||
name: string;
|
||||
created: number;
|
||||
} & DiskImageFormat;
|
||||
|
||||
export class DiskImageApi {
|
||||
/**
|
||||
* Upload a new disk image file to the server
|
||||
*/
|
||||
static async Upload(
|
||||
file: File,
|
||||
progress: (progress: number) => void
|
||||
): Promise<void> {
|
||||
const fd = new FormData();
|
||||
fd.append("file", file);
|
||||
|
||||
await APIClient.exec({
|
||||
method: "POST",
|
||||
uri: "/disk_images/upload",
|
||||
formData: fd,
|
||||
upProgress: progress,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the list of disk images
|
||||
*/
|
||||
static async GetList(): Promise<DiskImage[]> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: "/disk_images/list",
|
||||
})
|
||||
).data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Download disk image file
|
||||
*/
|
||||
static async Download(
|
||||
file: DiskImage,
|
||||
progress: (p: number) => void
|
||||
): Promise<Blob> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: `/disk_images/${file.file_name}`,
|
||||
downProgress(e) {
|
||||
progress(Math.floor(100 * (e.progress / e.total)));
|
||||
},
|
||||
})
|
||||
).data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert disk image file
|
||||
*/
|
||||
static async Convert(
|
||||
file: DiskImage,
|
||||
dest_file_name: string,
|
||||
dest_format: DiskImageFormat
|
||||
): Promise<void> {
|
||||
await APIClient.exec({
|
||||
method: "POST",
|
||||
uri: `/disk_images/${file.file_name}/convert`,
|
||||
jsonData: { ...dest_format, dest_file_name },
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Backup VM disk into image disks library
|
||||
*/
|
||||
static async BackupVMDisk(
|
||||
vm: VMInfo,
|
||||
disk: VMFileDisk,
|
||||
dest_file_name: string,
|
||||
format: DiskImageFormat
|
||||
): Promise<void> {
|
||||
await APIClient.exec({
|
||||
uri: `/vm/${vm.uuid}/disk/${disk.name}/backup`,
|
||||
method: "POST",
|
||||
jsonData: { ...format, dest_file_name },
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename disk image file
|
||||
*/
|
||||
static async Rename(file: DiskImage, name: string): Promise<void> {
|
||||
await APIClient.exec({
|
||||
method: "POST",
|
||||
uri: `/disk_images/${file.file_name}/rename`,
|
||||
jsonData: { name },
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete disk image file
|
||||
*/
|
||||
static async Delete(file: DiskImage): Promise<void> {
|
||||
await APIClient.exec({
|
||||
method: "DELETE",
|
||||
uri: `/disk_images/${file.file_name}`,
|
||||
});
|
||||
}
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
import { APIClient } from "./ApiClient";
|
||||
|
||||
export class GroupApi {
|
||||
/**
|
||||
* Get the entire list of networks
|
||||
*/
|
||||
static async GetList(): Promise<string[]> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: "/group/list",
|
||||
})
|
||||
).data;
|
||||
}
|
||||
}
|
@ -5,15 +5,6 @@ export interface IsoFile {
|
||||
size: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* ISO catalog entries
|
||||
*/
|
||||
export interface ISOCatalogEntry {
|
||||
name: string;
|
||||
url: string;
|
||||
image: string;
|
||||
}
|
||||
|
||||
export class IsoFilesApi {
|
||||
/**
|
||||
* Upload a new ISO file to the server
|
||||
@ -83,23 +74,4 @@ export class IsoFilesApi {
|
||||
uri: `/iso/${file.filename}`,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get iso catalog
|
||||
*/
|
||||
static async Catalog(): Promise<ISOCatalogEntry[]> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: "/assets/iso_catalog.json",
|
||||
})
|
||||
).data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get catalog image URL
|
||||
*/
|
||||
static CatalogImageURL(entry: ISOCatalogEntry): string {
|
||||
return APIClient.backendURL() + entry.image;
|
||||
}
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ export interface NWFilter {
|
||||
rules: NWFilterRule[];
|
||||
}
|
||||
|
||||
export function NWFilterURL(n: NWFilter, edit = false): string {
|
||||
export function NWFilterURL(n: NWFilter, edit: boolean = false): string {
|
||||
return `/nwfilter/${n.uuid}${edit ? "/edit" : ""}`;
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ export class NWFilterApi {
|
||||
static async Delete(n: NWFilter): Promise<void> {
|
||||
await APIClient.exec({
|
||||
method: "DELETE",
|
||||
uri: `/nwfilter/${n.uuid!}`,
|
||||
uri: `/nwfilter/${n.uuid}`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ export interface NetworkInfo {
|
||||
|
||||
export type NetworkStatus = "Started" | "Stopped";
|
||||
|
||||
export function NetworkURL(n: NetworkInfo, edit = false): string {
|
||||
export function NetworkURL(n: NetworkInfo, edit: boolean = false): string {
|
||||
return `/net/${n.uuid}${edit ? "/edit" : ""}`;
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,6 @@ export interface ServerConfig {
|
||||
local_auth_enabled: boolean;
|
||||
oidc_auth_enabled: boolean;
|
||||
iso_mimetypes: string[];
|
||||
disk_images_mimetypes: string[];
|
||||
net_mac_prefix: string;
|
||||
builtin_nwfilter_rules: string[];
|
||||
nwfilter_chains: string[];
|
||||
@ -14,15 +13,12 @@ export interface ServerConfig {
|
||||
|
||||
export interface ServerConstraints {
|
||||
iso_max_size: number;
|
||||
disk_image_max_size: number;
|
||||
vnc_token_duration: number;
|
||||
vm_name_size: LenConstraint;
|
||||
vm_title_size: LenConstraint;
|
||||
group_id_size: LenConstraint;
|
||||
memory_size: LenConstraint;
|
||||
disk_name_size: LenConstraint;
|
||||
disk_size: LenConstraint;
|
||||
disk_image_name_size: LenConstraint;
|
||||
net_name_size: LenConstraint;
|
||||
net_title_size: LenConstraint;
|
||||
net_nat_comment_size: LenConstraint;
|
||||
@ -77,7 +73,7 @@ interface SystemInfo {
|
||||
secs: number;
|
||||
nanos: number;
|
||||
};
|
||||
global_cpu_usage: number;
|
||||
global_cpu_info: GlobalCPUInfo;
|
||||
cpus: CpuCore[];
|
||||
physical_core_count: number;
|
||||
total_memory: number;
|
||||
@ -98,6 +94,14 @@ interface SystemInfo {
|
||||
host_name: string;
|
||||
}
|
||||
|
||||
interface GlobalCPUInfo {
|
||||
cpu_usage: number;
|
||||
name: string;
|
||||
vendor_id: string;
|
||||
brand: string;
|
||||
frequency: number;
|
||||
}
|
||||
|
||||
interface CpuCore {
|
||||
cpu_usage: number;
|
||||
name: string;
|
||||
@ -220,28 +224,4 @@ export class ServerApi {
|
||||
})
|
||||
).data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get host networks bridges list
|
||||
*/
|
||||
static async GetNetworksBridgesList(): Promise<string[]> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: "/server/bridges",
|
||||
})
|
||||
).data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Export all server configs
|
||||
*/
|
||||
static async ExportServerConfigs(): Promise<Blob> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: "/server/export_configs",
|
||||
})
|
||||
).data;
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ export interface APIToken {
|
||||
max_inactivity?: number;
|
||||
}
|
||||
|
||||
export function APITokenURL(t: APIToken, edit = false): string {
|
||||
export function APITokenURL(t: APIToken, edit: boolean = false): string {
|
||||
return `/token/${t.id}${edit ? "/edit" : ""}`;
|
||||
}
|
||||
|
||||
|
@ -17,38 +17,19 @@ export type VMState =
|
||||
| "PowerManagementSuspended"
|
||||
| "Other";
|
||||
|
||||
export type VMFileDisk = BaseFileVMDisk & (RawVMDisk | QCow2Disk);
|
||||
export type DiskAllocType = "Sparse" | "Fixed";
|
||||
|
||||
export type DiskBusType = "Virtio" | "SATA";
|
||||
|
||||
export interface BaseFileVMDisk {
|
||||
export interface VMDisk {
|
||||
size: number;
|
||||
name: string;
|
||||
bus: DiskBusType;
|
||||
|
||||
alloc_type: DiskAllocType;
|
||||
delete: boolean;
|
||||
|
||||
// For new disk only
|
||||
from_image?: string;
|
||||
|
||||
// Resize disk image after clone
|
||||
resize?: boolean;
|
||||
|
||||
// application attributes
|
||||
// application attribute
|
||||
new?: boolean;
|
||||
originalSize?: number;
|
||||
deleteType?: "keepfile" | "deletefile";
|
||||
}
|
||||
|
||||
interface RawVMDisk {
|
||||
format: "Raw";
|
||||
is_sparse: boolean;
|
||||
}
|
||||
|
||||
interface QCow2Disk {
|
||||
format: "QCow2";
|
||||
}
|
||||
|
||||
export interface VMNetInterfaceFilterParams {
|
||||
name: string;
|
||||
value: string;
|
||||
@ -59,16 +40,11 @@ export interface VMNetInterfaceFilter {
|
||||
parameters: VMNetInterfaceFilterParams[];
|
||||
}
|
||||
|
||||
export type VMNetInterface = (
|
||||
| VMNetUserspaceSLIRPStack
|
||||
| VMNetDefinedNetwork
|
||||
| VMNetBridge
|
||||
) &
|
||||
export type VMNetInterface = (VMNetUserspaceSLIRPStack | VMNetDefinedNetwork) &
|
||||
VMNetInterfaceBase;
|
||||
|
||||
export interface VMNetInterfaceBase {
|
||||
mac: string;
|
||||
model: "Virtio" | "E1000";
|
||||
nwfilterref?: VMNetInterfaceFilter;
|
||||
}
|
||||
|
||||
@ -81,40 +57,21 @@ export interface VMNetDefinedNetwork {
|
||||
network: string;
|
||||
}
|
||||
|
||||
export interface VMNetBridge {
|
||||
type: "Bridge";
|
||||
bridge: string;
|
||||
}
|
||||
|
||||
export interface VMCloudInit {
|
||||
attach_config: boolean;
|
||||
user_data: string;
|
||||
instance_id?: string;
|
||||
local_hostname?: string;
|
||||
dsmode?: "Net" | "Local";
|
||||
network_configuration?: string;
|
||||
}
|
||||
|
||||
export type VMBootType = "UEFI" | "UEFISecureBoot" | "Legacy";
|
||||
|
||||
interface VMInfoInterface {
|
||||
name: string;
|
||||
uuid?: string;
|
||||
genid?: string;
|
||||
title?: string;
|
||||
description?: string;
|
||||
group?: string;
|
||||
boot_type: VMBootType;
|
||||
boot_type: "UEFI" | "UEFISecureBoot";
|
||||
architecture: "i686" | "x86_64";
|
||||
memory: number;
|
||||
number_vcpu: number;
|
||||
vnc_access: boolean;
|
||||
iso_files: string[];
|
||||
file_disks: VMFileDisk[];
|
||||
disks: VMDisk[];
|
||||
networks: VMNetInterface[];
|
||||
tpm_module: boolean;
|
||||
oem_strings: string[];
|
||||
cloud_init: VMCloudInit;
|
||||
}
|
||||
|
||||
export class VMInfo implements VMInfoInterface {
|
||||
@ -123,18 +80,15 @@ export class VMInfo implements VMInfoInterface {
|
||||
genid?: string;
|
||||
title?: string;
|
||||
description?: string;
|
||||
group?: string;
|
||||
boot_type: VMBootType;
|
||||
boot_type: "UEFI" | "UEFISecureBoot";
|
||||
architecture: "i686" | "x86_64";
|
||||
number_vcpu: number;
|
||||
memory: number;
|
||||
vnc_access: boolean;
|
||||
iso_files: string[];
|
||||
file_disks: VMFileDisk[];
|
||||
disks: VMDisk[];
|
||||
networks: VMNetInterface[];
|
||||
tpm_module: boolean;
|
||||
oem_strings: string[];
|
||||
cloud_init: VMCloudInit;
|
||||
|
||||
constructor(int: VMInfoInterface) {
|
||||
this.name = int.name;
|
||||
@ -142,18 +96,15 @@ export class VMInfo implements VMInfoInterface {
|
||||
this.genid = int.genid;
|
||||
this.title = int.title;
|
||||
this.description = int.description;
|
||||
this.group = int.group;
|
||||
this.boot_type = int.boot_type;
|
||||
this.architecture = int.architecture;
|
||||
this.number_vcpu = int.number_vcpu;
|
||||
this.memory = int.memory;
|
||||
this.vnc_access = int.vnc_access;
|
||||
this.iso_files = int.iso_files;
|
||||
this.file_disks = int.file_disks;
|
||||
this.disks = int.disks;
|
||||
this.networks = int.networks;
|
||||
this.tpm_module = int.tpm_module;
|
||||
this.oem_strings = int.oem_strings;
|
||||
this.cloud_init = int.cloud_init;
|
||||
}
|
||||
|
||||
static NewEmpty(): VMInfo {
|
||||
@ -161,15 +112,13 @@ export class VMInfo implements VMInfoInterface {
|
||||
name: "",
|
||||
boot_type: "UEFI",
|
||||
architecture: "x86_64",
|
||||
memory: 1000 * 1000 * 1000,
|
||||
memory: 1024,
|
||||
number_vcpu: 1,
|
||||
vnc_access: true,
|
||||
iso_files: [],
|
||||
file_disks: [],
|
||||
disks: [],
|
||||
networks: [],
|
||||
tpm_module: true,
|
||||
oem_strings: [],
|
||||
cloud_init: { attach_config: false, user_data: "" },
|
||||
});
|
||||
}
|
||||
|
||||
@ -242,8 +191,8 @@ export class VMApi {
|
||||
*/
|
||||
static async UpdateSingle(vm: VMInfo): Promise<VMInfo> {
|
||||
// Process disks list, looking for removal
|
||||
vm.file_disks = vm.file_disks.filter((d) => d.deleteType !== "keepfile");
|
||||
vm.file_disks.forEach((d) => {
|
||||
vm.disks = vm.disks.filter((d) => d.deleteType !== "keepfile");
|
||||
vm.disks.forEach((d) => {
|
||||
if (d.deleteType === "deletefile") d.delete = true;
|
||||
});
|
||||
|
||||
|
@ -1,148 +0,0 @@
|
||||
import {
|
||||
Button,
|
||||
Dialog,
|
||||
DialogActions,
|
||||
DialogContent,
|
||||
DialogContentText,
|
||||
DialogTitle,
|
||||
} from "@mui/material";
|
||||
import React from "react";
|
||||
import { DiskImage, DiskImageApi, DiskImageFormat } from "../api/DiskImageApi";
|
||||
import { ServerApi } from "../api/ServerApi";
|
||||
import { VMFileDisk, VMInfo } from "../api/VMApi";
|
||||
import { useAlert } from "../hooks/providers/AlertDialogProvider";
|
||||
import { useLoadingMessage } from "../hooks/providers/LoadingMessageProvider";
|
||||
import { FileDiskImageWidget } from "../widgets/FileDiskImageWidget";
|
||||
import { CheckboxInput } from "../widgets/forms/CheckboxInput";
|
||||
import { SelectInput } from "../widgets/forms/SelectInput";
|
||||
import { TextInput } from "../widgets/forms/TextInput";
|
||||
import { VMDiskFileWidget } from "../widgets/vms/VMDiskFileWidget";
|
||||
|
||||
export function ConvertDiskImageDialog(
|
||||
p: {
|
||||
onCancel: () => void;
|
||||
onFinished: () => void;
|
||||
} & (
|
||||
| { backup?: false; image: DiskImage }
|
||||
| { backup: true; disk: VMFileDisk; vm: VMInfo }
|
||||
)
|
||||
): React.ReactElement {
|
||||
const alert = useAlert();
|
||||
const loadingMessage = useLoadingMessage();
|
||||
|
||||
const [format, setFormat] = React.useState<DiskImageFormat>({
|
||||
format: "QCow2",
|
||||
});
|
||||
|
||||
const origFilename = p.backup ? p.disk.name : p.image.file_name;
|
||||
|
||||
const [filename, setFilename] = React.useState(origFilename + ".qcow2");
|
||||
|
||||
const handleFormatChange = (value?: string) => {
|
||||
setFormat({ format: value ?? ("QCow2" as any) });
|
||||
|
||||
if (value === "QCow2") setFilename(`${origFilename}.qcow2`);
|
||||
if (value === "GzCompressedQCow2") setFilename(`${origFilename}.qcow2.gz`);
|
||||
if (value === "XzCompressedQCow2") setFilename(`${origFilename}.qcow2.xz`);
|
||||
if (value === "Raw") {
|
||||
setFilename(`${origFilename}.raw`);
|
||||
// Check sparse checkbox by default
|
||||
setFormat({ format: "Raw", is_sparse: true });
|
||||
}
|
||||
if (value === "GzCompressedRaw") setFilename(`${origFilename}.raw.gz`);
|
||||
if (value === "XzCompressedRaw") setFilename(`${origFilename}.raw.xz`);
|
||||
};
|
||||
|
||||
const handleSubmit = async () => {
|
||||
try {
|
||||
loadingMessage.show(
|
||||
p.backup ? "Performing backup..." : "Converting image..."
|
||||
);
|
||||
|
||||
// Perform the conversion / backup operation
|
||||
if (p.backup)
|
||||
await DiskImageApi.BackupVMDisk(p.vm, p.disk, filename, format);
|
||||
else await DiskImageApi.Convert(p.image, filename, format);
|
||||
|
||||
p.onFinished();
|
||||
|
||||
alert(p.backup ? "Backup successful!" : "Conversion successful!");
|
||||
} catch (e) {
|
||||
console.error("Failed to perform backup/conversion!", e);
|
||||
alert(
|
||||
p.backup
|
||||
? `Failed to perform backup! ${e}`
|
||||
: `Failed to convert image! ${e}`
|
||||
);
|
||||
} finally {
|
||||
loadingMessage.hide();
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open onClose={p.onCancel}>
|
||||
<DialogTitle>
|
||||
{p.backup ? `Backup disk ${p.disk.name}` : "Convert disk image"}
|
||||
</DialogTitle>
|
||||
|
||||
<DialogContent>
|
||||
<DialogContentText>
|
||||
Select the destination format for this image:
|
||||
</DialogContentText>
|
||||
|
||||
{/* Show details of of the image */}
|
||||
{p.backup ? (
|
||||
<VMDiskFileWidget {...p} />
|
||||
) : (
|
||||
<FileDiskImageWidget {...p} />
|
||||
)}
|
||||
|
||||
{/* New image format */}
|
||||
<SelectInput
|
||||
editable
|
||||
label="Target format"
|
||||
value={format.format}
|
||||
onValueChange={handleFormatChange}
|
||||
options={[
|
||||
{ value: "QCow2" },
|
||||
{ value: "Raw" },
|
||||
{ value: "GzCompressedRaw" },
|
||||
{ value: "XzCompressedRaw" },
|
||||
{ value: "GzCompressedQCow2" },
|
||||
{ value: "XzCompressedQCow2" },
|
||||
]}
|
||||
/>
|
||||
|
||||
{/* Check for sparse file */}
|
||||
{format.format === "Raw" && (
|
||||
<CheckboxInput
|
||||
editable
|
||||
label="Sparse file"
|
||||
checked={format.is_sparse}
|
||||
onValueChange={(c) => {
|
||||
setFormat({ format: "Raw", is_sparse: c });
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* New image name */}
|
||||
<TextInput
|
||||
editable
|
||||
label="New image name"
|
||||
value={filename}
|
||||
onValueChange={(s) => {
|
||||
setFilename(s ?? "");
|
||||
}}
|
||||
size={ServerApi.Config.constraints.disk_image_name_size}
|
||||
helperText="The image name shall contain the proper file extension for the selected target format"
|
||||
/>
|
||||
</DialogContent>
|
||||
<DialogActions>
|
||||
<Button onClick={p.onCancel}>Cancel</Button>
|
||||
<Button onClick={handleSubmit} autoFocus>
|
||||
{p.backup ? "Perform backup" : "Convert image"}
|
||||
</Button>
|
||||
</DialogActions>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
import {
|
||||
Button,
|
||||
Dialog,
|
||||
DialogActions,
|
||||
DialogContent,
|
||||
DialogTitle,
|
||||
List,
|
||||
ListItem,
|
||||
ListItemAvatar,
|
||||
ListItemButton,
|
||||
ListItemText,
|
||||
} from "@mui/material";
|
||||
import React from "react";
|
||||
import { ISOCatalogEntry, IsoFilesApi } from "../api/IsoFilesApi";
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
|
||||
export function IsoCatalogDialog(p: {
|
||||
open: boolean;
|
||||
onClose: () => void;
|
||||
}): React.ReactElement {
|
||||
const [catalog, setCatalog] = React.useState<ISOCatalogEntry[] | undefined>();
|
||||
|
||||
const load = async () => {
|
||||
setCatalog(await IsoFilesApi.Catalog());
|
||||
};
|
||||
|
||||
return (
|
||||
<Dialog open={p.open} onClose={p.onClose}>
|
||||
<DialogTitle>ISO catalog</DialogTitle>
|
||||
<DialogContent>
|
||||
<AsyncWidget
|
||||
loadKey={1}
|
||||
load={load}
|
||||
errMsg="Failed to load catalog"
|
||||
build={() => <IsoCatalogDialogInner catalog={catalog!} />}
|
||||
/>
|
||||
</DialogContent>
|
||||
<DialogActions>
|
||||
<Button autoFocus onClick={p.onClose}>
|
||||
Close
|
||||
</Button>
|
||||
</DialogActions>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
export function IsoCatalogDialogInner(p: {
|
||||
catalog: ISOCatalogEntry[];
|
||||
}): React.ReactElement {
|
||||
return (
|
||||
<List dense>
|
||||
{p.catalog.map((entry) => (
|
||||
<a
|
||||
key={entry.name}
|
||||
href={entry.url}
|
||||
target="_blank"
|
||||
rel="noopener"
|
||||
style={{ color: "inherit", textDecoration: "none" }}
|
||||
>
|
||||
<ListItem>
|
||||
<ListItemButton>
|
||||
<ListItemAvatar>
|
||||
<img
|
||||
src={IsoFilesApi.CatalogImageURL(entry)}
|
||||
style={{ width: "2em" }}
|
||||
/>
|
||||
</ListItemAvatar>
|
||||
<ListItemText primary={entry.name} />
|
||||
</ListItemButton>
|
||||
</ListItem>
|
||||
</a>
|
||||
))}
|
||||
</List>
|
||||
);
|
||||
}
|
@ -39,7 +39,7 @@ export function AlertDialogProvider(p: PropsWithChildren): React.ReactElement {
|
||||
|
||||
return (
|
||||
<>
|
||||
<AlertContextK value={hook}>{p.children}</AlertContextK>
|
||||
<AlertContextK.Provider value={hook}>{p.children}</AlertContextK.Provider>
|
||||
|
||||
<Dialog
|
||||
open={open}
|
||||
@ -67,5 +67,5 @@ export function AlertDialogProvider(p: PropsWithChildren): React.ReactElement {
|
||||
}
|
||||
|
||||
export function useAlert(): AlertContext {
|
||||
return React.use(AlertContextK)!;
|
||||
return React.useContext(AlertContextK)!;
|
||||
}
|
||||
|
@ -59,13 +59,13 @@ export function ConfirmDialogProvider(
|
||||
|
||||
return (
|
||||
<>
|
||||
<ConfirmContextK value={hook}>
|
||||
<ConfirmContextK.Provider value={hook}>
|
||||
{p.children}
|
||||
</ConfirmContextK>
|
||||
</ConfirmContextK.Provider>
|
||||
|
||||
<Dialog
|
||||
open={open}
|
||||
onClose={() => { handleClose(false); }}
|
||||
onClose={() => handleClose(false)}
|
||||
aria-labelledby="alert-dialog-title"
|
||||
aria-describedby="alert-dialog-description"
|
||||
>
|
||||
@ -76,10 +76,10 @@ export function ConfirmDialogProvider(
|
||||
</DialogContentText>
|
||||
</DialogContent>
|
||||
<DialogActions>
|
||||
<Button onClick={() => { handleClose(false); }} autoFocus>
|
||||
<Button onClick={() => handleClose(false)} autoFocus>
|
||||
{cancelButton ?? "Cancel"}
|
||||
</Button>
|
||||
<Button onClick={() => { handleClose(true); }} color="error">
|
||||
<Button onClick={() => handleClose(true)} color="error">
|
||||
{confirmButton ?? "Confirm"}
|
||||
</Button>
|
||||
</DialogActions>
|
||||
@ -89,5 +89,5 @@ export function ConfirmDialogProvider(
|
||||
}
|
||||
|
||||
export function useConfirm(): ConfirmContext {
|
||||
return React.use(ConfirmContextK)!;
|
||||
return React.useContext(ConfirmContextK)!;
|
||||
}
|
||||
|
@ -6,10 +6,10 @@ import {
|
||||
} from "@mui/material";
|
||||
import React, { PropsWithChildren } from "react";
|
||||
|
||||
interface LoadingMessageContext {
|
||||
type LoadingMessageContext = {
|
||||
show: (message: string) => void;
|
||||
hide: () => void;
|
||||
}
|
||||
};
|
||||
|
||||
const LoadingMessageContextK =
|
||||
React.createContext<LoadingMessageContext | null>(null);
|
||||
@ -34,9 +34,9 @@ export function LoadingMessageProvider(
|
||||
|
||||
return (
|
||||
<>
|
||||
<LoadingMessageContextK value={hook}>
|
||||
<LoadingMessageContextK.Provider value={hook}>
|
||||
{p.children}
|
||||
</LoadingMessageContextK>
|
||||
</LoadingMessageContextK.Provider>
|
||||
|
||||
<Dialog open={open}>
|
||||
<DialogContent>
|
||||
@ -60,5 +60,5 @@ export function LoadingMessageProvider(
|
||||
}
|
||||
|
||||
export function useLoadingMessage(): LoadingMessageContext {
|
||||
return React.use(LoadingMessageContextK)!;
|
||||
return React.useContext(LoadingMessageContextK)!;
|
||||
}
|
||||
|
@ -24,9 +24,9 @@ export function SnackbarProvider(p: PropsWithChildren): React.ReactElement {
|
||||
|
||||
return (
|
||||
<>
|
||||
<SnackbarContextK value={hook}>
|
||||
<SnackbarContextK.Provider value={hook}>
|
||||
{p.children}
|
||||
</SnackbarContextK>
|
||||
</SnackbarContextK.Provider>
|
||||
|
||||
<Snackbar
|
||||
open={open}
|
||||
@ -39,5 +39,5 @@ export function SnackbarProvider(p: PropsWithChildren): React.ReactElement {
|
||||
}
|
||||
|
||||
export function useSnackbar(): SnackbarContext {
|
||||
return React.use(SnackbarContextK)!;
|
||||
return React.useContext(SnackbarContextK)!;
|
||||
}
|
||||
|
@ -3,44 +3,17 @@ import "@fontsource/roboto/400.css";
|
||||
import "@fontsource/roboto/500.css";
|
||||
import "@fontsource/roboto/700.css";
|
||||
|
||||
import { ThemeProvider, createTheme } from "@mui/material";
|
||||
import React from "react";
|
||||
import ReactDOM from "react-dom/client";
|
||||
import { App } from "./App";
|
||||
import { AlertDialogProvider } from "./hooks/providers/AlertDialogProvider";
|
||||
import { ConfirmDialogProvider } from "./hooks/providers/ConfirmDialogProvider";
|
||||
import { LoadingMessageProvider } from "./hooks/providers/LoadingMessageProvider";
|
||||
import { SnackbarProvider } from "./hooks/providers/SnackbarProvider";
|
||||
import "./index.css";
|
||||
import reportWebVitals from "./reportWebVitals";
|
||||
import { LoadServerConfig } from "./widgets/LoadServerConfig";
|
||||
|
||||
import { loader } from "@monaco-editor/react";
|
||||
import * as monaco from "monaco-editor";
|
||||
import EditorWorker from "monaco-editor/esm/vs/editor/editor.worker?worker";
|
||||
import { configureMonacoYaml } from "monaco-yaml";
|
||||
import YamlWorker from "monaco-yaml/yaml.worker?worker";
|
||||
|
||||
// This allows to use a self hosted instance of Monaco editor
|
||||
loader.config({ monaco });
|
||||
|
||||
// Add YAML support to Monaco
|
||||
configureMonacoYaml(monaco, {
|
||||
enableSchemaRequest: false,
|
||||
});
|
||||
|
||||
/// YAML worker
|
||||
window.MonacoEnvironment = {
|
||||
getWorker(_moduleId, label) {
|
||||
switch (label) {
|
||||
case "editorWorkerService":
|
||||
return new EditorWorker();
|
||||
case "yaml":
|
||||
return new YamlWorker();
|
||||
default:
|
||||
throw new Error(`Unknown label ${label}`);
|
||||
}
|
||||
},
|
||||
};
|
||||
import { ThemeProvider, createTheme } from "@mui/material";
|
||||
import { LoadingMessageProvider } from "./hooks/providers/LoadingMessageProvider";
|
||||
import { AlertDialogProvider } from "./hooks/providers/AlertDialogProvider";
|
||||
import { SnackbarProvider } from "./hooks/providers/SnackbarProvider";
|
||||
import { ConfirmDialogProvider } from "./hooks/providers/ConfirmDialogProvider";
|
||||
|
||||
const darkTheme = createTheme({
|
||||
palette: {
|
||||
@ -48,7 +21,9 @@ const darkTheme = createTheme({
|
||||
},
|
||||
});
|
||||
|
||||
const root = ReactDOM.createRoot(document.getElementById("root")!);
|
||||
const root = ReactDOM.createRoot(
|
||||
document.getElementById("root") as HTMLElement
|
||||
);
|
||||
root.render(
|
||||
<React.StrictMode>
|
||||
<ThemeProvider theme={darkTheme}>
|
||||
@ -66,3 +41,8 @@ root.render(
|
||||
</ThemeProvider>
|
||||
</React.StrictMode>
|
||||
);
|
||||
|
||||
// If you want to start measuring performance in your app, pass a function
|
||||
// to log results (for example: reportWebVitals(console.log))
|
||||
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
|
||||
reportWebVitals();
|
||||
|
15
virtweb_frontend/src/reportWebVitals.ts
Normal file
15
virtweb_frontend/src/reportWebVitals.ts
Normal file
@ -0,0 +1,15 @@
|
||||
import { ReportHandler } from 'web-vitals';
|
||||
|
||||
const reportWebVitals = (onPerfEntry?: ReportHandler) => {
|
||||
if (onPerfEntry && onPerfEntry instanceof Function) {
|
||||
import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => {
|
||||
getCLS(onPerfEntry);
|
||||
getFID(onPerfEntry);
|
||||
getFCP(onPerfEntry);
|
||||
getLCP(onPerfEntry);
|
||||
getTTFB(onPerfEntry);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
export default reportWebVitals;
|
@ -1,417 +0,0 @@
|
||||
import DeleteIcon from "@mui/icons-material/Delete";
|
||||
import DownloadIcon from "@mui/icons-material/Download";
|
||||
import LoopIcon from "@mui/icons-material/Loop";
|
||||
import MoreVertIcon from "@mui/icons-material/MoreVert";
|
||||
import RefreshIcon from "@mui/icons-material/Refresh";
|
||||
import {
|
||||
Alert,
|
||||
Button,
|
||||
CircularProgress,
|
||||
IconButton,
|
||||
LinearProgress,
|
||||
ListItemIcon,
|
||||
ListItemText,
|
||||
Menu,
|
||||
MenuItem,
|
||||
Tooltip,
|
||||
Typography,
|
||||
} from "@mui/material";
|
||||
import { DataGrid, GridColDef } from "@mui/x-data-grid";
|
||||
import { filesize } from "filesize";
|
||||
import React from "react";
|
||||
import { DiskImage, DiskImageApi } from "../api/DiskImageApi";
|
||||
import { ServerApi } from "../api/ServerApi";
|
||||
import { ConvertDiskImageDialog } from "../dialogs/ConvertDiskImageDialog";
|
||||
import { useAlert } from "../hooks/providers/AlertDialogProvider";
|
||||
import { useConfirm } from "../hooks/providers/ConfirmDialogProvider";
|
||||
import { useLoadingMessage } from "../hooks/providers/LoadingMessageProvider";
|
||||
import { useSnackbar } from "../hooks/providers/SnackbarProvider";
|
||||
import { downloadBlob } from "../utils/FilesUtils";
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
import { DateWidget } from "../widgets/DateWidget";
|
||||
import { FileInput } from "../widgets/forms/FileInput";
|
||||
import { VirtWebPaper } from "../widgets/VirtWebPaper";
|
||||
import { VirtWebRouteContainer } from "../widgets/VirtWebRouteContainer";
|
||||
|
||||
export function DiskImagesRoute(): React.ReactElement {
|
||||
const [list, setList] = React.useState<DiskImage[] | undefined>();
|
||||
|
||||
const loadKey = React.useRef(1);
|
||||
|
||||
const load = async () => {
|
||||
setList(await DiskImageApi.GetList());
|
||||
};
|
||||
|
||||
const reload = () => {
|
||||
loadKey.current += 1;
|
||||
setList(undefined);
|
||||
};
|
||||
|
||||
return (
|
||||
<VirtWebRouteContainer
|
||||
label="Disk images management"
|
||||
actions={
|
||||
<span>
|
||||
<Tooltip title="Refresh Disk images list">
|
||||
<IconButton onClick={reload}>
|
||||
<RefreshIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<AsyncWidget
|
||||
loadKey={loadKey.current}
|
||||
errMsg="Failed to load disk images list!"
|
||||
load={load}
|
||||
ready={list !== undefined}
|
||||
build={() => (
|
||||
<>
|
||||
<UploadDiskImageCard onFileUploaded={reload} />
|
||||
<DiskImageList list={list!} onReload={reload} />
|
||||
</>
|
||||
)}
|
||||
/>
|
||||
</VirtWebRouteContainer>
|
||||
);
|
||||
}
|
||||
|
||||
function UploadDiskImageCard(p: {
|
||||
onFileUploaded: () => void;
|
||||
}): React.ReactElement {
|
||||
const alert = useAlert();
|
||||
const snackbar = useSnackbar();
|
||||
|
||||
const [value, setValue] = React.useState<File | null>(null);
|
||||
const [uploadProgress, setUploadProgress] = React.useState<number | null>(
|
||||
null
|
||||
);
|
||||
|
||||
const handleChange = (newValue: File | null) => {
|
||||
if (
|
||||
newValue &&
|
||||
newValue.size > ServerApi.Config.constraints.disk_image_max_size
|
||||
) {
|
||||
alert(
|
||||
`The file is too big (max size allowed: ${filesize(
|
||||
ServerApi.Config.constraints.disk_image_max_size
|
||||
)}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
newValue &&
|
||||
newValue.type.length > 0 &&
|
||||
!ServerApi.Config.disk_images_mimetypes.includes(newValue.type)
|
||||
) {
|
||||
alert(`Selected file mimetype is not allowed! (${newValue.type})`);
|
||||
return;
|
||||
}
|
||||
|
||||
setValue(newValue);
|
||||
};
|
||||
|
||||
const upload = async () => {
|
||||
try {
|
||||
setUploadProgress(0);
|
||||
await DiskImageApi.Upload(value!, setUploadProgress);
|
||||
|
||||
setValue(null);
|
||||
snackbar("The file was successfully uploaded!");
|
||||
|
||||
p.onFileUploaded();
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
await alert(`Failed to perform file upload! ${e}`);
|
||||
}
|
||||
|
||||
setUploadProgress(null);
|
||||
};
|
||||
|
||||
if (uploadProgress !== null) {
|
||||
return (
|
||||
<VirtWebPaper label="File upload" noHorizontalMargin>
|
||||
<Typography variant="body1">
|
||||
Upload in progress ({Math.floor(uploadProgress * 100)}%)...
|
||||
</Typography>
|
||||
<LinearProgress variant="determinate" value={uploadProgress * 100} />
|
||||
</VirtWebPaper>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<VirtWebPaper label="Disk image upload" noHorizontalMargin>
|
||||
<div style={{ display: "flex", alignItems: "center" }}>
|
||||
<FileInput
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
style={{ flex: 1 }}
|
||||
slotProps={{
|
||||
htmlInput: {
|
||||
accept: ServerApi.Config.disk_images_mimetypes.join(","),
|
||||
},
|
||||
}}
|
||||
/>
|
||||
|
||||
{value && <Button onClick={upload}>Upload</Button>}
|
||||
</div>
|
||||
</VirtWebPaper>
|
||||
);
|
||||
}
|
||||
|
||||
function DiskImageList(p: {
|
||||
list: DiskImage[];
|
||||
onReload: () => void;
|
||||
}): React.ReactElement {
|
||||
const alert = useAlert();
|
||||
const snackbar = useSnackbar();
|
||||
const confirm = useConfirm();
|
||||
const loadingMessage = useLoadingMessage();
|
||||
|
||||
const [dlProgress, setDlProgress] = React.useState<undefined | number>();
|
||||
|
||||
const [currConversion, setCurrConversion] = React.useState<
|
||||
DiskImage | undefined
|
||||
>();
|
||||
|
||||
// Download disk image file
|
||||
const downloadDiskImage = async (entry: DiskImage) => {
|
||||
setDlProgress(0);
|
||||
|
||||
try {
|
||||
const blob = await DiskImageApi.Download(entry, setDlProgress);
|
||||
|
||||
downloadBlob(blob, entry.file_name);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
alert(`Failed to download disk image file! ${e}`);
|
||||
}
|
||||
|
||||
setDlProgress(undefined);
|
||||
};
|
||||
|
||||
// Convert disk image file
|
||||
const convertDiskImage = (entry: DiskImage) => {
|
||||
setCurrConversion(entry);
|
||||
};
|
||||
|
||||
// Delete disk image
|
||||
const deleteDiskImage = async (entry: DiskImage) => {
|
||||
if (
|
||||
!(await confirm(
|
||||
`Do you really want to delete this disk image (${entry.file_name}) ?`
|
||||
))
|
||||
)
|
||||
return;
|
||||
|
||||
loadingMessage.show("Deleting disk image file...");
|
||||
|
||||
try {
|
||||
await DiskImageApi.Delete(entry);
|
||||
snackbar("The disk image has been successfully deleted!");
|
||||
p.onReload();
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
alert(`Failed to delete disk image!\n${e}`);
|
||||
}
|
||||
|
||||
loadingMessage.hide();
|
||||
};
|
||||
|
||||
if (p.list.length === 0)
|
||||
return (
|
||||
<Typography variant="body1" style={{ textAlign: "center" }}>
|
||||
No disk image uploaded for now.
|
||||
</Typography>
|
||||
);
|
||||
|
||||
const columns: GridColDef<(typeof p.list)[number]>[] = [
|
||||
{ field: "file_name", headerName: "File name", flex: 3, editable: true },
|
||||
{
|
||||
field: "format",
|
||||
headerName: "Format",
|
||||
flex: 1,
|
||||
renderCell(params) {
|
||||
let content = params.row.format;
|
||||
|
||||
if (params.row.format === "Raw") {
|
||||
content += params.row.is_sparse ? " (Sparse)" : " (Fixed)";
|
||||
}
|
||||
|
||||
return content;
|
||||
},
|
||||
},
|
||||
{
|
||||
field: "file_size",
|
||||
headerName: "File size",
|
||||
flex: 1,
|
||||
renderCell(params) {
|
||||
let res = filesize(params.row.file_size);
|
||||
|
||||
if (params.row.format === "QCow2") {
|
||||
res += ` (${filesize(params.row.virtual_size!)})`;
|
||||
}
|
||||
|
||||
return res;
|
||||
},
|
||||
},
|
||||
{
|
||||
field: "created",
|
||||
headerName: "Created",
|
||||
flex: 1,
|
||||
renderCell(params) {
|
||||
return <DateWidget time={params.row.created} />;
|
||||
},
|
||||
},
|
||||
{
|
||||
field: "actions",
|
||||
type: "actions",
|
||||
headerName: "",
|
||||
width: 55,
|
||||
cellClassName: "actions",
|
||||
editable: false,
|
||||
getActions: (params) => {
|
||||
return [
|
||||
<DiskImageActionMenu
|
||||
key="menu"
|
||||
diskImage={params.row}
|
||||
onDownload={downloadDiskImage}
|
||||
onConvert={convertDiskImage}
|
||||
onDelete={deleteDiskImage}
|
||||
/>,
|
||||
];
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Download notification */}
|
||||
{dlProgress !== undefined && (
|
||||
<Alert severity="info">
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "row",
|
||||
alignItems: "center",
|
||||
overflow: "hidden",
|
||||
}}
|
||||
>
|
||||
<Typography variant="body1">
|
||||
Downloading... {dlProgress}%
|
||||
</Typography>
|
||||
<CircularProgress
|
||||
variant="determinate"
|
||||
size={"1.5rem"}
|
||||
style={{ marginLeft: "10px" }}
|
||||
value={dlProgress}
|
||||
/>
|
||||
</div>
|
||||
</Alert>
|
||||
)}
|
||||
|
||||
{/* Disk image conversion dialog */}
|
||||
{currConversion && (
|
||||
<ConvertDiskImageDialog
|
||||
image={currConversion}
|
||||
onCancel={() => {
|
||||
setCurrConversion(undefined);
|
||||
}}
|
||||
onFinished={() => {
|
||||
setCurrConversion(undefined);
|
||||
p.onReload();
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* The table itself */}
|
||||
<DataGrid<DiskImage>
|
||||
getRowId={(c) => c.file_name}
|
||||
rows={p.list}
|
||||
columns={columns}
|
||||
processRowUpdate={async (n, o) => {
|
||||
try {
|
||||
await DiskImageApi.Rename(o, n.file_name);
|
||||
return n;
|
||||
} catch (e) {
|
||||
console.error("Failed to rename disk image!", e);
|
||||
alert(`Failed to rename disk image! ${e}`);
|
||||
throw e;
|
||||
} finally {
|
||||
p.onReload();
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function DiskImageActionMenu(p: {
|
||||
diskImage: DiskImage;
|
||||
onDownload: (d: DiskImage) => void;
|
||||
onConvert: (d: DiskImage) => void;
|
||||
onDelete: (d: DiskImage) => void;
|
||||
}): React.ReactElement {
|
||||
const [anchorEl, setAnchorEl] = React.useState<null | HTMLElement>(null);
|
||||
const open = Boolean(anchorEl);
|
||||
const handleClick = (event: React.MouseEvent<HTMLElement>) => {
|
||||
setAnchorEl(event.currentTarget);
|
||||
};
|
||||
const handleClose = () => {
|
||||
setAnchorEl(null);
|
||||
};
|
||||
return (
|
||||
<>
|
||||
<IconButton
|
||||
aria-label="Actions"
|
||||
aria-haspopup="true"
|
||||
onClick={handleClick}
|
||||
>
|
||||
<MoreVertIcon />
|
||||
</IconButton>
|
||||
<Menu anchorEl={anchorEl} open={open} onClose={handleClose}>
|
||||
{/* Download disk image */}
|
||||
<MenuItem
|
||||
onClick={() => {
|
||||
handleClose();
|
||||
p.onDownload(p.diskImage);
|
||||
}}
|
||||
>
|
||||
<ListItemIcon>
|
||||
<DownloadIcon />
|
||||
</ListItemIcon>
|
||||
<ListItemText secondary={"Download disk image"}>
|
||||
Download
|
||||
</ListItemText>
|
||||
</MenuItem>
|
||||
|
||||
{/* Convert disk image */}
|
||||
<MenuItem
|
||||
onClick={() => {
|
||||
handleClose();
|
||||
p.onConvert(p.diskImage);
|
||||
}}
|
||||
>
|
||||
<ListItemIcon>
|
||||
<LoopIcon />
|
||||
</ListItemIcon>
|
||||
<ListItemText secondary={"Convert disk image"}>Convert</ListItemText>
|
||||
</MenuItem>
|
||||
|
||||
{/* Delete disk image */}
|
||||
<MenuItem
|
||||
onClick={() => {
|
||||
handleClose();
|
||||
p.onDelete(p.diskImage);
|
||||
}}
|
||||
>
|
||||
<ListItemIcon>
|
||||
<DeleteIcon color="error" />
|
||||
</ListItemIcon>
|
||||
<ListItemText secondary={"Delete disk image"}>Delete</ListItemText>
|
||||
</MenuItem>
|
||||
</Menu>
|
||||
</>
|
||||
);
|
||||
}
|
@ -116,7 +116,7 @@ function EditApiTokenRouteInner(p: {
|
||||
const [changed, setChanged] = React.useState(false);
|
||||
|
||||
const [, updateState] = React.useState<any>();
|
||||
const forceUpdate = React.useCallback(() => { updateState({}); }, []);
|
||||
const forceUpdate = React.useCallback(() => updateState({}), []);
|
||||
|
||||
const valueChanged = () => {
|
||||
setChanged(true);
|
||||
|
@ -99,7 +99,7 @@ function EditNetworkFilterRouteInner(p: {
|
||||
const [changed, setChanged] = React.useState(false);
|
||||
|
||||
const [, updateState] = React.useState<any>();
|
||||
const forceUpdate = React.useCallback(() => { updateState({}); }, []);
|
||||
const forceUpdate = React.useCallback(() => updateState({}), []);
|
||||
|
||||
const valueChanged = () => {
|
||||
setChanged(true);
|
||||
|
@ -97,7 +97,7 @@ function EditNetworkRouteInner(p: {
|
||||
const [changed, setChanged] = React.useState(false);
|
||||
|
||||
const [, updateState] = React.useState<any>();
|
||||
const forceUpdate = React.useCallback(() => { updateState({}); }, []);
|
||||
const forceUpdate = React.useCallback(() => updateState({}), []);
|
||||
|
||||
const valueChanged = () => {
|
||||
setChanged(true);
|
||||
|
@ -15,7 +15,7 @@ export function CreateVMRoute(): React.ReactElement {
|
||||
const alert = useAlert();
|
||||
const navigate = useNavigate();
|
||||
|
||||
const [vm, setVM] = React.useState(VMInfo.NewEmpty());
|
||||
const [vm, setVM] = React.useState(VMInfo.NewEmpty);
|
||||
|
||||
const create = async (v: VMInfo) => {
|
||||
try {
|
||||
@ -103,9 +103,7 @@ function EditVMInner(p: {
|
||||
const [changed, setChanged] = React.useState(false);
|
||||
|
||||
const [, updateState] = React.useState<any>();
|
||||
const forceUpdate = React.useCallback(() => {
|
||||
updateState({});
|
||||
}, []);
|
||||
const forceUpdate = React.useCallback(() => updateState({}), []);
|
||||
|
||||
const valueChanged = () => {
|
||||
setChanged(true);
|
||||
|
@ -1,7 +1,4 @@
|
||||
import DeleteIcon from "@mui/icons-material/Delete";
|
||||
import DownloadIcon from "@mui/icons-material/Download";
|
||||
import MenuBookIcon from "@mui/icons-material/MenuBook";
|
||||
import RefreshIcon from "@mui/icons-material/Refresh";
|
||||
import {
|
||||
Alert,
|
||||
Button,
|
||||
@ -12,25 +9,24 @@ import {
|
||||
Tooltip,
|
||||
Typography,
|
||||
} from "@mui/material";
|
||||
import DownloadIcon from "@mui/icons-material/Download";
|
||||
import { DataGrid, GridColDef } from "@mui/x-data-grid";
|
||||
import { filesize } from "filesize";
|
||||
import { MuiFileInput } from "mui-file-input";
|
||||
import React from "react";
|
||||
import { IsoFile, IsoFilesApi } from "../api/IsoFilesApi";
|
||||
import { ServerApi } from "../api/ServerApi";
|
||||
import { IsoCatalogDialog } from "../dialogs/IsoCatalogDialog";
|
||||
import { useAlert } from "../hooks/providers/AlertDialogProvider";
|
||||
import { useConfirm } from "../hooks/providers/ConfirmDialogProvider";
|
||||
import { useLoadingMessage } from "../hooks/providers/LoadingMessageProvider";
|
||||
import { useSnackbar } from "../hooks/providers/SnackbarProvider";
|
||||
import { downloadBlob } from "../utils/FilesUtils";
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
import { FileInput } from "../widgets/forms/FileInput";
|
||||
import { VirtWebPaper } from "../widgets/VirtWebPaper";
|
||||
import { VirtWebRouteContainer } from "../widgets/VirtWebRouteContainer";
|
||||
import { useConfirm } from "../hooks/providers/ConfirmDialogProvider";
|
||||
import { downloadBlob } from "../utils/FilesUtils";
|
||||
|
||||
export function IsoFilesRoute(): React.ReactElement {
|
||||
const [list, setList] = React.useState<IsoFile[] | undefined>();
|
||||
const [isoCatalog, setIsoCatalog] = React.useState(false);
|
||||
|
||||
const loadKey = React.useRef(1);
|
||||
|
||||
@ -44,41 +40,19 @@ export function IsoFilesRoute(): React.ReactElement {
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<AsyncWidget
|
||||
loadKey={loadKey.current}
|
||||
errMsg="Failed to load ISO files list!"
|
||||
load={load}
|
||||
ready={list !== undefined}
|
||||
build={() => (
|
||||
<VirtWebRouteContainer
|
||||
label="ISO files management"
|
||||
actions={
|
||||
<span>
|
||||
<Tooltip title="Open the ISO catalog">
|
||||
<IconButton onClick={() => { setIsoCatalog(true); }}>
|
||||
<MenuBookIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
<Tooltip title="Refresh ISO list">
|
||||
<IconButton onClick={reload}>
|
||||
<RefreshIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<UploadIsoFileCard onFileUploaded={reload} />
|
||||
<UploadIsoFileFromUrlCard onFileUploaded={reload} />
|
||||
<IsoFilesList list={list!} onReload={reload} />
|
||||
</VirtWebRouteContainer>
|
||||
)}
|
||||
/>
|
||||
<IsoCatalogDialog
|
||||
open={isoCatalog}
|
||||
onClose={() => { setIsoCatalog(false); }}
|
||||
/>
|
||||
</>
|
||||
<AsyncWidget
|
||||
loadKey={loadKey.current}
|
||||
errMsg="Failed to load ISO files list!"
|
||||
load={load}
|
||||
ready={list !== undefined}
|
||||
build={() => (
|
||||
<VirtWebRouteContainer label="ISO files management">
|
||||
<UploadIsoFileCard onFileUploaded={reload} />
|
||||
<UploadIsoFileFromUrlCard onFileUploaded={reload} />
|
||||
<IsoFilesList list={list!} onReload={reload} />
|
||||
</VirtWebRouteContainer>
|
||||
)}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@ -122,7 +96,7 @@ function UploadIsoFileCard(p: {
|
||||
p.onFileUploaded();
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
await alert(`Failed to perform file upload! ${e}`);
|
||||
await alert("Failed to perform file upload! " + e);
|
||||
}
|
||||
|
||||
setUploadProgress(null);
|
||||
@ -130,7 +104,7 @@ function UploadIsoFileCard(p: {
|
||||
|
||||
if (uploadProgress !== null) {
|
||||
return (
|
||||
<VirtWebPaper label="File upload" noHorizontalMargin>
|
||||
<VirtWebPaper label="File upload">
|
||||
<Typography variant="body1">
|
||||
Upload in progress ({Math.floor(uploadProgress * 100)}%)...
|
||||
</Typography>
|
||||
@ -140,15 +114,13 @@ function UploadIsoFileCard(p: {
|
||||
}
|
||||
|
||||
return (
|
||||
<VirtWebPaper label="File upload" noHorizontalMargin>
|
||||
<VirtWebPaper label="File upload">
|
||||
<div style={{ display: "flex", alignItems: "center" }}>
|
||||
<FileInput
|
||||
<MuiFileInput
|
||||
value={value}
|
||||
onChange={handleChange}
|
||||
style={{ flex: 1 }}
|
||||
slotProps={{
|
||||
htmlInput: { accept: ServerApi.Config.iso_mimetypes.join(",") },
|
||||
}}
|
||||
inputProps={{ accept: ServerApi.Config.iso_mimetypes.join(",") }}
|
||||
/>
|
||||
|
||||
{value && <Button onClick={upload}>Upload file</Button>}
|
||||
@ -175,8 +147,6 @@ function UploadIsoFileFromUrlCard(p: {
|
||||
loadingMessage.show("Downloading file from URL...");
|
||||
await IsoFilesApi.UploadFromURL(url, actualFileName);
|
||||
|
||||
p.onFileUploaded();
|
||||
|
||||
setURL("");
|
||||
setFilename(null);
|
||||
snackbar("Successfully downloaded file!");
|
||||
@ -188,24 +158,20 @@ function UploadIsoFileFromUrlCard(p: {
|
||||
};
|
||||
|
||||
return (
|
||||
<VirtWebPaper label="File upload from URL" noHorizontalMargin>
|
||||
<VirtWebPaper label="File upload from URL">
|
||||
<div style={{ display: "flex", alignItems: "center" }}>
|
||||
<TextField
|
||||
label="URL"
|
||||
value={url}
|
||||
style={{ flex: 3 }}
|
||||
onChange={(e) => {
|
||||
setURL(e.target.value);
|
||||
}}
|
||||
onChange={(e) => setURL(e.target.value)}
|
||||
/>
|
||||
<span style={{ width: "10px" }}></span>
|
||||
<TextField
|
||||
label="Filename"
|
||||
value={actualFileName}
|
||||
style={{ flex: 2 }}
|
||||
onChange={(e) => {
|
||||
setFilename(e.target.value);
|
||||
}}
|
||||
onChange={(e) => setFilename(e.target.value)}
|
||||
/>
|
||||
{url !== "" && actualFileName !== "" && (
|
||||
<Button onClick={upload}>Upload file</Button>
|
||||
@ -232,7 +198,7 @@ function IsoFilesList(p: {
|
||||
try {
|
||||
const blob = await IsoFilesApi.Download(entry, setDlProgress);
|
||||
|
||||
downloadBlob(blob, entry.filename);
|
||||
await downloadBlob(blob, entry.filename);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
alert("Failed to download iso file!");
|
||||
@ -270,7 +236,7 @@ function IsoFilesList(p: {
|
||||
</Typography>
|
||||
);
|
||||
|
||||
const columns: GridColDef<IsoFile>[] = [
|
||||
const columns: GridColDef[] = [
|
||||
{ field: "filename", headerName: "File name", flex: 3 },
|
||||
{
|
||||
field: "size",
|
||||
@ -305,31 +271,39 @@ function IsoFilesList(p: {
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Download notification */}
|
||||
{dlProgress !== undefined && (
|
||||
<Alert severity="info">
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "row",
|
||||
alignItems: "center",
|
||||
overflow: "hidden",
|
||||
}}
|
||||
>
|
||||
<Typography variant="body1">
|
||||
Downloading... {dlProgress}%
|
||||
</Typography>
|
||||
<CircularProgress
|
||||
variant="determinate"
|
||||
size={"1.5rem"}
|
||||
style={{ marginLeft: "10px" }}
|
||||
value={dlProgress}
|
||||
/>
|
||||
</div>
|
||||
</Alert>
|
||||
)}
|
||||
{/* ISO files list table */}
|
||||
<DataGrid getRowId={(c) => c.filename} rows={p.list} columns={columns} />
|
||||
<VirtWebPaper label="Files list">
|
||||
{/* Download notification */}
|
||||
{dlProgress !== undefined && (
|
||||
<Alert severity="info">
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "row",
|
||||
alignItems: "center",
|
||||
overflow: "hidden",
|
||||
}}
|
||||
>
|
||||
<Typography variant="body1">
|
||||
Downloading... {dlProgress}%
|
||||
</Typography>
|
||||
<CircularProgress
|
||||
variant="determinate"
|
||||
size={"1.5rem"}
|
||||
style={{ marginLeft: "10px" }}
|
||||
value={dlProgress}
|
||||
/>
|
||||
</div>
|
||||
</Alert>
|
||||
)}
|
||||
|
||||
{/* Files list table */}
|
||||
<DataGrid
|
||||
getRowId={(c) => c.filename}
|
||||
rows={p.list}
|
||||
columns={columns}
|
||||
autoHeight={true}
|
||||
/>
|
||||
</VirtWebPaper>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ function NetworkFiltersListRouteInner(p: {
|
||||
const onlyBuiltin = visibleFilters === VisibleFilters.Builtin;
|
||||
|
||||
return p.list.filter((f) => NWFilterIsBuiltin(f) === onlyBuiltin);
|
||||
}, [visibleFilters, p.list]);
|
||||
}, [visibleFilters]);
|
||||
|
||||
return (
|
||||
<VirtWebRouteContainer
|
||||
@ -78,9 +78,7 @@ function NetworkFiltersListRouteInner(p: {
|
||||
size="small"
|
||||
value={visibleFilters}
|
||||
exclusive
|
||||
onChange={(_ev, v) => {
|
||||
setVisibleFilters(v);
|
||||
}}
|
||||
onChange={(_ev, v) => setVisibleFilters(v)}
|
||||
aria-label="visible filters"
|
||||
>
|
||||
<ToggleButton value={VisibleFilters.All}>All</ToggleButton>
|
||||
@ -132,8 +130,8 @@ function NetworkFiltersListRouteInner(p: {
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<ul>
|
||||
{t.join_filters.map((f) => (
|
||||
<li key={f}>{f}</li>
|
||||
{t.join_filters.map((f, n) => (
|
||||
<li key={n}>{f}</li>
|
||||
))}
|
||||
</ul>
|
||||
</TableCell>
|
||||
|
@ -3,15 +3,7 @@ import { RouterLink } from "../widgets/RouterLink";
|
||||
|
||||
export function NotFoundRoute(): React.ReactElement {
|
||||
return (
|
||||
<div
|
||||
style={{
|
||||
textAlign: "center",
|
||||
flex: 1,
|
||||
justifyContent: "center",
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
}}
|
||||
>
|
||||
<div style={{ textAlign: "center" }}>
|
||||
<h1>404 Not found</h1>
|
||||
<p>The page you requested was not found!</p>
|
||||
<RouterLink to="/">
|
||||
|
@ -1,4 +1,3 @@
|
||||
/* eslint-disable react-x/no-array-index-key */
|
||||
import {
|
||||
mdiHarddisk,
|
||||
mdiInformation,
|
||||
@ -9,21 +8,16 @@ import {
|
||||
import Icon from "@mdi/react";
|
||||
import {
|
||||
Box,
|
||||
IconButton,
|
||||
Grid,
|
||||
LinearProgress,
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableRow,
|
||||
Tooltip,
|
||||
Typography,
|
||||
} from "@mui/material";
|
||||
import Grid from "@mui/material/Grid";
|
||||
import { PieChart } from "@mui/x-charts";
|
||||
import { filesize } from "filesize";
|
||||
import humanizeDuration from "humanize-duration";
|
||||
import IosShareIcon from "@mui/icons-material/IosShare";
|
||||
import React from "react";
|
||||
import {
|
||||
DiskInfo,
|
||||
@ -34,8 +28,8 @@ import {
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
import { VirtWebPaper } from "../widgets/VirtWebPaper";
|
||||
import { VirtWebRouteContainer } from "../widgets/VirtWebRouteContainer";
|
||||
import { useLoadingMessage } from "../hooks/providers/LoadingMessageProvider";
|
||||
import { useAlert } from "../hooks/providers/AlertDialogProvider";
|
||||
import humanizeDuration from "humanize-duration";
|
||||
import { filesize } from "filesize";
|
||||
|
||||
export function SysInfoRoute(): React.ReactElement {
|
||||
const [info, setInfo] = React.useState<ServerSystemInfo>();
|
||||
@ -57,23 +51,6 @@ export function SysInfoRoute(): React.ReactElement {
|
||||
export function SysInfoRouteInner(p: {
|
||||
info: ServerSystemInfo;
|
||||
}): React.ReactElement {
|
||||
const alert = useAlert();
|
||||
const loadingMessage = useLoadingMessage();
|
||||
const downloadAllConfig = async () => {
|
||||
try {
|
||||
loadingMessage.show("Downloading server config...");
|
||||
const res = await ServerApi.ExportServerConfigs();
|
||||
|
||||
const url = URL.createObjectURL(res);
|
||||
window.location.href = url;
|
||||
} catch (e) {
|
||||
console.error("Failed to download server config!", e);
|
||||
alert(`Failed to download server config! ${e}`);
|
||||
} finally {
|
||||
loadingMessage.hide();
|
||||
}
|
||||
};
|
||||
|
||||
const sumDiskUsage = p.info.disks.reduce(
|
||||
(prev, disk) => {
|
||||
return {
|
||||
@ -85,19 +62,10 @@ export function SysInfoRouteInner(p: {
|
||||
);
|
||||
|
||||
return (
|
||||
<VirtWebRouteContainer
|
||||
label="Sysinfo"
|
||||
actions={
|
||||
<Tooltip title="Export all server configs">
|
||||
<IconButton onClick={downloadAllConfig}>
|
||||
<IosShareIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
}
|
||||
>
|
||||
<VirtWebRouteContainer label="Sysinfo">
|
||||
<Grid container spacing={2}>
|
||||
{/* Memory */}
|
||||
<Grid size={{ xs: 4 }}>
|
||||
<Grid xs={4}>
|
||||
<Box flexGrow={1}>
|
||||
<Typography style={{ textAlign: "center" }}>Memory</Typography>
|
||||
<PieChart
|
||||
@ -129,7 +97,7 @@ export function SysInfoRouteInner(p: {
|
||||
</Grid>
|
||||
|
||||
{/* Disk usage */}
|
||||
<Grid size={{ xs: 4 }}>
|
||||
<Grid xs={4}>
|
||||
<Box flexGrow={1}>
|
||||
<Typography style={{ textAlign: "center" }}>Disk usage</Typography>
|
||||
<PieChart
|
||||
@ -157,7 +125,7 @@ export function SysInfoRouteInner(p: {
|
||||
</Grid>
|
||||
|
||||
{/* CPU usage */}
|
||||
<Grid size={{ xs: 4 }}>
|
||||
<Grid xs={4}>
|
||||
<Box flexGrow={1}>
|
||||
<Typography style={{ textAlign: "center" }}>CPU usage</Typography>
|
||||
<PieChart
|
||||
@ -166,13 +134,13 @@ export function SysInfoRouteInner(p: {
|
||||
data: [
|
||||
{
|
||||
id: 1,
|
||||
value: 100 - p.info.system.global_cpu_usage,
|
||||
value: 100 - p.info.system.global_cpu_info.cpu_usage,
|
||||
label: "Free",
|
||||
},
|
||||
|
||||
{
|
||||
id: 2,
|
||||
value: p.info.system.global_cpu_usage,
|
||||
value: p.info.system.global_cpu_info.cpu_usage,
|
||||
label: "Used",
|
||||
},
|
||||
],
|
||||
@ -212,18 +180,18 @@ export function SysInfoRouteInner(p: {
|
||||
label="CPU info"
|
||||
icon={<Icon size={"1rem"} path={mdiMemory} />}
|
||||
entries={[
|
||||
{ label: "Brand", value: p.info.system.cpus[0].brand },
|
||||
{ label: "Brand", value: p.info.system.global_cpu_info.brand },
|
||||
{
|
||||
label: "Vendor ID",
|
||||
value: p.info.system.cpus[0].vendor_id,
|
||||
value: p.info.system.global_cpu_info.vendor_id,
|
||||
},
|
||||
{
|
||||
label: "CPU usage",
|
||||
value: p.info.system.cpus[0].cpu_usage,
|
||||
value: p.info.system.global_cpu_info.cpu_usage,
|
||||
},
|
||||
{
|
||||
label: "Name",
|
||||
value: p.info.system.cpus[0].name,
|
||||
value: p.info.system.global_cpu_info.name,
|
||||
},
|
||||
{
|
||||
label: "CPU model",
|
||||
@ -268,7 +236,7 @@ export function SysInfoRouteInner(p: {
|
||||
function SysInfoDetailsTable(p: {
|
||||
label: string;
|
||||
icon: React.ReactElement;
|
||||
entries: { label: string; value: string | number }[];
|
||||
entries: Array<{ label: string; value: string | number }>;
|
||||
}): React.ReactElement {
|
||||
return (
|
||||
<VirtWebPaper
|
||||
|
@ -1,4 +1,3 @@
|
||||
/* eslint-disable react-x/no-array-index-key */
|
||||
import VisibilityIcon from "@mui/icons-material/Visibility";
|
||||
import {
|
||||
Button,
|
||||
@ -10,7 +9,6 @@ import {
|
||||
TableContainer,
|
||||
TableHead,
|
||||
TableRow,
|
||||
Typography,
|
||||
} from "@mui/material";
|
||||
import React from "react";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
@ -59,78 +57,70 @@ export function TokensListRouteInner(p: {
|
||||
</RouterLink>
|
||||
}
|
||||
>
|
||||
{p.list.length > 0 && (
|
||||
<TableContainer component={Paper}>
|
||||
<Table>
|
||||
<TableHead>
|
||||
<TableRow>
|
||||
<TableCell>Name</TableCell>
|
||||
<TableCell>Description</TableCell>
|
||||
<TableCell>Created</TableCell>
|
||||
<TableCell>Updated</TableCell>
|
||||
<TableCell>Last used</TableCell>
|
||||
<TableCell>IP restriction</TableCell>
|
||||
<TableCell>Max inactivity</TableCell>
|
||||
<TableCell>Rights</TableCell>
|
||||
<TableCell>Actions</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{p.list.map((t) => {
|
||||
return (
|
||||
<TableRow
|
||||
key={t.id}
|
||||
hover
|
||||
onDoubleClick={() => navigate(APITokenURL(t))}
|
||||
style={{ backgroundColor: ExpiredAPIToken(t) ? "red" : "" }}
|
||||
>
|
||||
<TableCell>
|
||||
{t.name} {ExpiredAPIToken(t) && <i>(Expired)</i>}
|
||||
</TableCell>
|
||||
<TableCell>{t.description}</TableCell>
|
||||
<TableCell>
|
||||
<TimeWidget time={t.created} />
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<TimeWidget time={t.updated} />
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<TimeWidget time={t.last_used} />
|
||||
</TableCell>
|
||||
<TableCell>{t.ip_restriction}</TableCell>
|
||||
<TableCell>
|
||||
{t.max_inactivity && timeDiff(0, t.max_inactivity)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{t.rights.map((r, n) => {
|
||||
return (
|
||||
<div key={n}>
|
||||
{r.verb} {r.path}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</TableCell>
|
||||
<TableContainer component={Paper}>
|
||||
<Table>
|
||||
<TableHead>
|
||||
<TableRow>
|
||||
<TableCell>Name</TableCell>
|
||||
<TableCell>Description</TableCell>
|
||||
<TableCell>Created</TableCell>
|
||||
<TableCell>Updated</TableCell>
|
||||
<TableCell>Last used</TableCell>
|
||||
<TableCell>IP restriction</TableCell>
|
||||
<TableCell>Max inactivity</TableCell>
|
||||
<TableCell>Rights</TableCell>
|
||||
<TableCell>Actions</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{p.list.map((t) => {
|
||||
return (
|
||||
<TableRow
|
||||
key={t.id}
|
||||
hover
|
||||
onDoubleClick={() => navigate(APITokenURL(t))}
|
||||
style={{ backgroundColor: ExpiredAPIToken(t) ? "red" : "" }}
|
||||
>
|
||||
<TableCell>
|
||||
{t.name} {ExpiredAPIToken(t) && <i>(Expired)</i>}
|
||||
</TableCell>
|
||||
<TableCell>{t.description}</TableCell>
|
||||
<TableCell>
|
||||
<TimeWidget time={t.created} />
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<TimeWidget time={t.updated} />
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<TimeWidget time={t.last_used} />
|
||||
</TableCell>
|
||||
<TableCell>{t.ip_restriction}</TableCell>
|
||||
<TableCell>
|
||||
{t.max_inactivity && timeDiff(0, t.max_inactivity)}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{t.rights.map((r) => {
|
||||
return (
|
||||
<div>
|
||||
{r.verb} {r.path}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</TableCell>
|
||||
|
||||
<TableCell>
|
||||
<RouterLink to={APITokenURL(t)}>
|
||||
<IconButton>
|
||||
<VisibilityIcon />
|
||||
</IconButton>
|
||||
</RouterLink>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</TableContainer>
|
||||
)}
|
||||
|
||||
{p.list.length === 0 && (
|
||||
<Typography style={{ textAlign: "center" }}>
|
||||
No API token created yet.
|
||||
</Typography>
|
||||
)}
|
||||
<TableCell>
|
||||
<RouterLink to={APITokenURL(t)}>
|
||||
<IconButton>
|
||||
<VisibilityIcon />
|
||||
</IconButton>
|
||||
</RouterLink>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</TableContainer>
|
||||
</VirtWebRouteContainer>
|
||||
);
|
||||
}
|
||||
|
@ -1,5 +1,3 @@
|
||||
import KeyboardArrowDownIcon from "@mui/icons-material/KeyboardArrowDown";
|
||||
import KeyboardArrowUpIcon from "@mui/icons-material/KeyboardArrowUp";
|
||||
import VisibilityIcon from "@mui/icons-material/Visibility";
|
||||
import {
|
||||
Button,
|
||||
@ -9,7 +7,6 @@ import {
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableContainer,
|
||||
TableFooter,
|
||||
TableHead,
|
||||
TableRow,
|
||||
Tooltip,
|
||||
@ -17,27 +14,19 @@ import {
|
||||
import { filesize } from "filesize";
|
||||
import React from "react";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import { GroupApi } from "../api/GroupApi";
|
||||
import { VMApi, VMInfo, VMState } from "../api/VMApi";
|
||||
import { VMApi, VMInfo } from "../api/VMApi";
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
import { RouterLink } from "../widgets/RouterLink";
|
||||
import { VirtWebRouteContainer } from "../widgets/VirtWebRouteContainer";
|
||||
import { VMStatusWidget } from "../widgets/vms/VMStatusWidget";
|
||||
|
||||
export function VMListRoute(): React.ReactElement {
|
||||
const [groups, setGroups] = React.useState<(string | undefined)[]>();
|
||||
const [list, setList] = React.useState<VMInfo[] | undefined>();
|
||||
|
||||
const loadKey = React.useRef(1);
|
||||
|
||||
const load = async () => {
|
||||
const groups: (string | undefined)[] = await GroupApi.GetList();
|
||||
const list = await VMApi.GetList();
|
||||
|
||||
if (list.find((v) => !v.group) !== undefined) groups.push(undefined);
|
||||
|
||||
setGroups(groups);
|
||||
setList(list);
|
||||
setList(await VMApi.GetList());
|
||||
};
|
||||
|
||||
const reload = () => {
|
||||
@ -62,7 +51,7 @@ export function VMListRoute(): React.ReactElement {
|
||||
</>
|
||||
}
|
||||
>
|
||||
<VMListWidget list={list!} groups={groups!} onReload={reload} />
|
||||
<VMListWidget list={list!} onReload={reload} />
|
||||
</VirtWebRouteContainer>
|
||||
)}
|
||||
/>
|
||||
@ -70,37 +59,11 @@ export function VMListRoute(): React.ReactElement {
|
||||
}
|
||||
|
||||
function VMListWidget(p: {
|
||||
groups: (string | undefined)[];
|
||||
list: VMInfo[];
|
||||
onReload: () => void;
|
||||
}): React.ReactElement {
|
||||
const navigate = useNavigate();
|
||||
|
||||
const [hiddenGroups, setHiddenGroups] = React.useState<
|
||||
Set<string | undefined>
|
||||
>(new Set());
|
||||
|
||||
const [runningVMs, setRunningVMs] = React.useState<Set<string>>(new Set());
|
||||
|
||||
const toggleHiddenGroup = (g: string | undefined) => {
|
||||
if (hiddenGroups.has(g)) hiddenGroups.delete(g);
|
||||
else hiddenGroups.add(g);
|
||||
|
||||
setHiddenGroups(new Set([...hiddenGroups]));
|
||||
};
|
||||
|
||||
const updateVMState = (v: VMInfo, s: VMState) => {
|
||||
const running = s !== "Shutoff";
|
||||
if (runningVMs.has(v.name) === running) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (running) runningVMs.add(v.name);
|
||||
else runningVMs.delete(v.name);
|
||||
|
||||
setRunningVMs(new Set([...runningVMs]));
|
||||
};
|
||||
|
||||
return (
|
||||
<TableContainer component={Paper}>
|
||||
<Table>
|
||||
@ -109,99 +72,38 @@ function VMListWidget(p: {
|
||||
<TableCell>Name</TableCell>
|
||||
<TableCell>Description</TableCell>
|
||||
<TableCell>Memory</TableCell>
|
||||
<TableCell>vCPU</TableCell>
|
||||
<TableCell>Status</TableCell>
|
||||
<TableCell>Actions</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{p.groups.map((g) => (
|
||||
<React.Fragment key={g}>
|
||||
{p.groups.length > 1 && (
|
||||
<TableRow>
|
||||
<TableCell
|
||||
style={{ paddingBottom: 2, paddingTop: 2 }}
|
||||
colSpan={6}
|
||||
>
|
||||
<IconButton
|
||||
size="small"
|
||||
onClick={() => {
|
||||
toggleHiddenGroup(g);
|
||||
}}
|
||||
>
|
||||
{!hiddenGroups.has(g) ? (
|
||||
<KeyboardArrowUpIcon />
|
||||
) : (
|
||||
<KeyboardArrowDownIcon />
|
||||
)}
|
||||
{p.list.map((row) => (
|
||||
<TableRow
|
||||
hover
|
||||
key={row.name}
|
||||
sx={{ "&:last-child td, &:last-child th": { border: 0 } }}
|
||||
onDoubleClick={() => navigate(row.ViewURL)}
|
||||
>
|
||||
<TableCell component="th" scope="row">
|
||||
{row.name}
|
||||
</TableCell>
|
||||
<TableCell>{row.description ?? ""}</TableCell>
|
||||
<TableCell>{filesize(row.memory * 1000 * 1000)}</TableCell>
|
||||
<TableCell>
|
||||
<VMStatusWidget vm={row} />
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Tooltip title="View this VM">
|
||||
<RouterLink to={row.ViewURL}>
|
||||
<IconButton>
|
||||
<VisibilityIcon />
|
||||
</IconButton>
|
||||
{g ?? "default"}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)}
|
||||
|
||||
{!hiddenGroups.has(g) &&
|
||||
p.list
|
||||
.filter((row) => row.group === g)
|
||||
.map((row) => (
|
||||
<TableRow
|
||||
hover
|
||||
key={row.name}
|
||||
sx={{ "&:last-child td, &:last-child th": { border: 0 } }}
|
||||
onDoubleClick={() => navigate(row.ViewURL)}
|
||||
>
|
||||
<TableCell component="th" scope="row">
|
||||
{row.name}
|
||||
</TableCell>
|
||||
<TableCell>{row.description ?? ""}</TableCell>
|
||||
<TableCell>{filesize(row.memory)}</TableCell>
|
||||
<TableCell>{row.number_vcpu}</TableCell>
|
||||
<TableCell>
|
||||
<VMStatusWidget
|
||||
vm={row}
|
||||
onChange={(s) => {
|
||||
updateVMState(row, s);
|
||||
}}
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<Tooltip title="View this VM">
|
||||
<RouterLink to={row.ViewURL}>
|
||||
<IconButton>
|
||||
<VisibilityIcon />
|
||||
</IconButton>
|
||||
</RouterLink>
|
||||
</Tooltip>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</React.Fragment>
|
||||
</RouterLink>
|
||||
</Tooltip>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
))}
|
||||
</TableBody>
|
||||
<TableFooter>
|
||||
<TableRow>
|
||||
<TableCell></TableCell>
|
||||
<TableCell></TableCell>
|
||||
<TableCell>
|
||||
{filesize(
|
||||
p.list
|
||||
.filter((v) => runningVMs.has(v.name))
|
||||
.reduce((s, v) => s + v.memory, 0)
|
||||
)}
|
||||
{" / "}
|
||||
{filesize(p.list.reduce((s, v) => s + v.memory, 0))}
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{p.list
|
||||
.filter((v) => runningVMs.has(v.name))
|
||||
.reduce((s, v) => s + v.number_vcpu, 0)}
|
||||
{" / "}
|
||||
{p.list.reduce((s, v) => s + v.number_vcpu, 0)}
|
||||
</TableCell>
|
||||
<TableCell></TableCell>
|
||||
<TableCell></TableCell>
|
||||
</TableRow>
|
||||
</TableFooter>
|
||||
</Table>
|
||||
</TableContainer>
|
||||
);
|
||||
|
@ -59,7 +59,6 @@ function VMRouteBody(p: { vm: VMInfo }): React.ReactElement {
|
||||
<VMDetails
|
||||
vm={p.vm}
|
||||
editable={false}
|
||||
state={state}
|
||||
screenshot={p.vm.vnc_access && state === "Running"}
|
||||
/>
|
||||
</VirtWebRouteContainer>
|
||||
|
@ -1,16 +1,17 @@
|
||||
import ArrowBackIcon from "@mui/icons-material/ArrowBack";
|
||||
import FullscreenIcon from "@mui/icons-material/Fullscreen";
|
||||
import FullscreenExitIcon from "@mui/icons-material/FullscreenExit";
|
||||
import KeyboardAltIcon from "@mui/icons-material/KeyboardAlt";
|
||||
import { IconButton, Tooltip } from "@mui/material";
|
||||
import React, { useEffect } from "react";
|
||||
import { useNavigate, useParams } from "react-router-dom";
|
||||
import { VncScreen, VncScreenHandle } from "react-vnc";
|
||||
import { VncScreen } from "react-vnc";
|
||||
import { ServerApi } from "../api/ServerApi";
|
||||
import { VMApi, VMInfo } from "../api/VMApi";
|
||||
import { useSnackbar } from "../hooks/providers/SnackbarProvider";
|
||||
import { time } from "../utils/DateUtils";
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
import RFB from "react-vnc/dist/types/noVNC/core/rfb";
|
||||
import KeyboardAltIcon from "@mui/icons-material/KeyboardAlt";
|
||||
|
||||
interface VNCTokenInfo {
|
||||
url: string;
|
||||
@ -42,10 +43,9 @@ function VNCInner(p: { vm: VMInfo }): React.ReactElement {
|
||||
|
||||
const [token, setToken] = React.useState<VNCTokenInfo | undefined>();
|
||||
const [counter, setCounter] = React.useState(1);
|
||||
const [connected, setConnected] = React.useState(false);
|
||||
const [rfb, setRFB] = React.useState<RFB | undefined>();
|
||||
|
||||
const vncRef = React.useRef<HTMLDivElement>(null);
|
||||
const vncScreenRef = React.useRef<VncScreenHandle>(null);
|
||||
const vncRef = React.createRef<HTMLDivElement>();
|
||||
|
||||
const connect = async (force: boolean) => {
|
||||
try {
|
||||
@ -71,7 +71,7 @@ function VNCInner(p: { vm: VMInfo }): React.ReactElement {
|
||||
};
|
||||
|
||||
const disconnected = () => {
|
||||
setConnected(false);
|
||||
setRFB(undefined);
|
||||
connect(true);
|
||||
};
|
||||
|
||||
@ -91,9 +91,7 @@ function VNCInner(p: { vm: VMInfo }): React.ReactElement {
|
||||
connect(false);
|
||||
|
||||
if (vncRef.current) {
|
||||
vncRef.current.onfullscreenchange = () => {
|
||||
setCounter(counter + 1);
|
||||
};
|
||||
vncRef.current.onfullscreenchange = () => setCounter(counter + 1);
|
||||
}
|
||||
});
|
||||
|
||||
@ -120,9 +118,9 @@ function VNCInner(p: { vm: VMInfo }): React.ReactElement {
|
||||
)}
|
||||
|
||||
{/* Keystrokes */}
|
||||
{connected && (
|
||||
{rfb && (
|
||||
<Tooltip title="Send Ctrl+Alt+Del">
|
||||
<IconButton onClick={() => vncScreenRef.current?.sendCtrlAltDel()}>
|
||||
<IconButton onClick={() => rfb?.sendCtrlAltDel()}>
|
||||
<KeyboardAltIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
@ -139,15 +137,12 @@ function VNCInner(p: { vm: VMInfo }): React.ReactElement {
|
||||
}}
|
||||
>
|
||||
<VncScreen
|
||||
ref={vncScreenRef}
|
||||
url={token.url}
|
||||
onDisconnect={() => {
|
||||
console.info("VNC disconnected " + token.url);
|
||||
console.info("VNC disconnected " + token?.url);
|
||||
disconnected();
|
||||
}}
|
||||
onConnect={() => {
|
||||
setConnected(true);
|
||||
}}
|
||||
onConnect={(rfb) => setRFB(rfb)}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -1,5 +1,4 @@
|
||||
import VisibilityIcon from "@mui/icons-material/Visibility";
|
||||
import VisibilityOffIcon from "@mui/icons-material/VisibilityOff";
|
||||
import { Visibility, VisibilityOff } from "@mui/icons-material";
|
||||
import {
|
||||
Alert,
|
||||
CircularProgress,
|
||||
@ -36,9 +35,7 @@ export function LoginRoute(): React.ReactElement {
|
||||
const canSubmit = username.length > 0 && password.length > 0;
|
||||
|
||||
const [showPassword, setShowPassword] = React.useState(false);
|
||||
const handleClickShowPassword = () => {
|
||||
setShowPassword((show) => !show);
|
||||
};
|
||||
const handleClickShowPassword = () => setShowPassword((show) => !show);
|
||||
|
||||
const handleMouseDownPassword = (
|
||||
event: React.MouseEvent<HTMLButtonElement>
|
||||
@ -107,14 +104,12 @@ export function LoginRoute(): React.ReactElement {
|
||||
label="Username"
|
||||
name="username"
|
||||
value={username}
|
||||
onChange={(e) => {
|
||||
setUsername(e.target.value);
|
||||
}}
|
||||
onChange={(e) => setUsername(e.target.value)}
|
||||
autoComplete="username"
|
||||
autoFocus
|
||||
/>
|
||||
|
||||
<FormControl required fullWidth variant="outlined">
|
||||
<FormControl fullWidth variant="outlined">
|
||||
<InputLabel htmlFor="password">Password</InputLabel>
|
||||
<OutlinedInput
|
||||
required
|
||||
@ -124,9 +119,7 @@ export function LoginRoute(): React.ReactElement {
|
||||
type={showPassword ? "text" : "password"}
|
||||
id="password"
|
||||
value={password}
|
||||
onChange={(e) => {
|
||||
setPassword(e.target.value);
|
||||
}}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
autoComplete="current-password"
|
||||
endAdornment={
|
||||
<InputAdornment position="end">
|
||||
@ -137,11 +130,7 @@ export function LoginRoute(): React.ReactElement {
|
||||
onMouseDown={handleMouseDownPassword}
|
||||
edge="end"
|
||||
>
|
||||
{showPassword ? (
|
||||
<VisibilityOffIcon />
|
||||
) : (
|
||||
<VisibilityIcon />
|
||||
)}
|
||||
{showPassword ? <VisibilityOff /> : <Visibility />}
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
</InputAdornment>
|
||||
|
5
virtweb_frontend/src/setupTests.ts
Normal file
5
virtweb_frontend/src/setupTests.ts
Normal file
@ -0,0 +1,5 @@
|
||||
// jest-dom adds custom jest matchers for asserting on DOM nodes.
|
||||
// allows you to do things like:
|
||||
// expect(element).toHaveTextContent(/react/i)
|
||||
// learn more: https://github.com/testing-library/jest-dom
|
||||
import '@testing-library/jest-dom';
|
@ -1,4 +1,4 @@
|
||||
export function downloadBlob(blob: Blob, filename: string) {
|
||||
export async function downloadBlob(blob: Blob, filename: string) {
|
||||
const url = URL.createObjectURL(blob);
|
||||
|
||||
const link = document.createElement("a");
|
||||
|
@ -2,9 +2,8 @@
|
||||
* Generate a random MAC address
|
||||
*/
|
||||
export function randomMacAddress(prefix: string | undefined): string {
|
||||
prefix = prefix ?? "";
|
||||
let mac = "XX:XX:XX:XX:XX:XX";
|
||||
mac = prefix + mac.slice(prefix.length);
|
||||
mac = prefix + mac.slice(prefix?.length);
|
||||
|
||||
return mac.replace(/X/g, () =>
|
||||
"0123456789abcdef".charAt(Math.floor(Math.random() * 16))
|
||||
|
@ -19,7 +19,7 @@ export function AsyncWidget(p: {
|
||||
}): React.ReactElement {
|
||||
const [state, setState] = useState(State.Loading);
|
||||
|
||||
const counter = useRef<any>(null);
|
||||
const counter = useRef<any | null>(null);
|
||||
|
||||
const load = async () => {
|
||||
try {
|
||||
@ -67,7 +67,7 @@ export function AsyncWidget(p: {
|
||||
|
||||
<Button onClick={load}>Try again</Button>
|
||||
|
||||
{p.errAdditionalElement?.()}
|
||||
{p.errAdditionalElement && p.errAdditionalElement()}
|
||||
</Box>
|
||||
)
|
||||
);
|
||||
|
@ -2,7 +2,6 @@ import {
|
||||
mdiApi,
|
||||
mdiBoxShadow,
|
||||
mdiDisc,
|
||||
mdiHarddisk,
|
||||
mdiHome,
|
||||
mdiInformation,
|
||||
mdiLan,
|
||||
@ -14,9 +13,11 @@ import {
|
||||
List,
|
||||
ListItemButton,
|
||||
ListItemIcon,
|
||||
ListItemSecondaryAction,
|
||||
ListItemText,
|
||||
} from "@mui/material";
|
||||
import { Outlet, useLocation } from "react-router-dom";
|
||||
import { isDebug } from "../utils/DebugUtils";
|
||||
import { RouterLink } from "./RouterLink";
|
||||
import { VirtWebAppBar } from "./VirtWebAppBar";
|
||||
|
||||
@ -67,11 +68,6 @@ export function BaseAuthenticatedPage(): React.ReactElement {
|
||||
uri="/nwfilter"
|
||||
icon={<Icon path={mdiSecurityNetwork} size={1} />}
|
||||
/>
|
||||
<NavLink
|
||||
label="Disk images"
|
||||
uri="/disk_images"
|
||||
icon={<Icon path={mdiHarddisk} size={1} />}
|
||||
/>
|
||||
<NavLink
|
||||
label="ISO files"
|
||||
uri="/iso"
|
||||
@ -88,15 +84,7 @@ export function BaseAuthenticatedPage(): React.ReactElement {
|
||||
icon={<Icon path={mdiInformation} size={1} />}
|
||||
/>
|
||||
</List>
|
||||
<div
|
||||
style={{
|
||||
flexGrow: 1,
|
||||
flexShrink: 0,
|
||||
flexBasis: 0,
|
||||
minWidth: 0,
|
||||
display: "flex",
|
||||
}}
|
||||
>
|
||||
<div style={{ flex: 1 }}>
|
||||
<Outlet />
|
||||
</div>
|
||||
</Box>
|
||||
@ -108,6 +96,7 @@ function NavLink(p: {
|
||||
icon: React.ReactElement;
|
||||
uri: string;
|
||||
label: string;
|
||||
secondaryAction?: React.ReactElement;
|
||||
}): React.ReactElement {
|
||||
const location = useLocation();
|
||||
return (
|
||||
@ -115,6 +104,9 @@ function NavLink(p: {
|
||||
<ListItemButton selected={p.uri === location.pathname}>
|
||||
<ListItemIcon>{p.icon}</ListItemIcon>
|
||||
<ListItemText primary={p.label} />
|
||||
{p.secondaryAction && (
|
||||
<ListItemSecondaryAction>{p.secondaryAction}</ListItemSecondaryAction>
|
||||
)}
|
||||
</ListItemButton>
|
||||
</RouterLink>
|
||||
);
|
||||
|
@ -38,7 +38,10 @@ export function BaseLoginPage() {
|
||||
<Grid container component="main" sx={{ height: "100vh" }}>
|
||||
<CssBaseline />
|
||||
<Grid
|
||||
size={{ xs: false, sm: 4, md: 7 }}
|
||||
item
|
||||
xs={false}
|
||||
sm={4}
|
||||
md={7}
|
||||
sx={{
|
||||
backgroundImage: "url(/login_splash.jpg)",
|
||||
backgroundRepeat: "no-repeat",
|
||||
@ -50,12 +53,7 @@ export function BaseLoginPage() {
|
||||
backgroundPosition: "center",
|
||||
}}
|
||||
/>
|
||||
<Grid
|
||||
size={{ xs: 12, sm: 8, md: 5 }}
|
||||
component={Paper}
|
||||
elevation={6}
|
||||
square
|
||||
>
|
||||
<Grid item xs={12} sm={8} md={5} component={Paper} elevation={6} square>
|
||||
<Box
|
||||
sx={{
|
||||
my: 8,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user