Compare commits
1 Commits
master
...
debe4f2579
Author | SHA1 | Date | |
---|---|---|---|
debe4f2579 |
@@ -46,9 +46,8 @@ steps:
|
||||
- cd virtweb_backend
|
||||
- mv /tmp/web_build/dist static
|
||||
- cargo build --release
|
||||
- cargo build --release --example api_curl
|
||||
- ls -lah target/release/virtweb_backend target/release/examples/api_curl
|
||||
- cp target/release/virtweb_backend target/release/examples/api_curl /tmp/release
|
||||
- ls -lah target/release/virtweb_backend
|
||||
- cp target/release/virtweb_backend /tmp/release
|
||||
|
||||
- name: gitea_release
|
||||
image: plugins/gitea-release
|
||||
|
484
virtweb_backend/Cargo.lock
generated
484
virtweb_backend/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -6,9 +6,9 @@ edition = "2024"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.28"
|
||||
log = "0.4.27"
|
||||
env_logger = "0.11.8"
|
||||
clap = { version = "4.5.47", features = ["derive", "env"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
light-openid = { version = "1.0.4", features = ["crypto-wrapper"] }
|
||||
lazy_static = "1.5.0"
|
||||
actix = "0.13.5"
|
||||
@@ -17,27 +17,27 @@ actix-remote-ip = "0.1.0"
|
||||
actix-session = { version = "0.10.1", features = ["cookie-session"] }
|
||||
actix-identity = "0.8.0"
|
||||
actix-cors = "0.7.1"
|
||||
actix-files = "0.6.7"
|
||||
actix-files = "0.6.6"
|
||||
actix-ws = "0.3.0"
|
||||
actix-http = "3.11.1"
|
||||
actix-http = "3.11.0"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.145"
|
||||
serde_json = "1.0.140"
|
||||
serde_yml = "0.0.12"
|
||||
quick-xml = { version = "0.38.3", features = ["serialize", "overlapped-lists"] }
|
||||
quick-xml = { version = "0.37.5", features = ["serialize", "overlapped-lists"] }
|
||||
futures-util = "0.3.31"
|
||||
anyhow = "1.0.99"
|
||||
anyhow = "1.0.98"
|
||||
actix-multipart = "0.7.2"
|
||||
tempfile = "3.20.0"
|
||||
reqwest = { version = "0.12.23", features = ["stream"] }
|
||||
url = "2.5.7"
|
||||
virt = "0.4.3"
|
||||
sysinfo = { version = "0.36.1", features = ["serde"] }
|
||||
uuid = { version = "1.17.0", features = ["v4", "serde"] }
|
||||
reqwest = { version = "0.12.20", features = ["stream"] }
|
||||
url = "2.5.4"
|
||||
virt = "0.4.2"
|
||||
sysinfo = { version = "0.35.1", features = ["serde"] }
|
||||
uuid = { version = "1.16.0", features = ["v4", "serde"] }
|
||||
lazy-regex = "3.4.1"
|
||||
thiserror = "2.0.16"
|
||||
image = "0.25.8"
|
||||
rand = "0.9.2"
|
||||
tokio = { version = "1.47.1", features = ["rt", "time", "macros"] }
|
||||
thiserror = "2.0.12"
|
||||
image = "0.25.6"
|
||||
rand = "0.9.1"
|
||||
tokio = { version = "1.45.1", features = ["rt", "time", "macros"] }
|
||||
futures = "0.3.31"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
num = "0.4.3"
|
||||
@@ -45,5 +45,3 @@ rust-embed = { version = "8.7.2", features = ["mime-guess"] }
|
||||
dotenvy = "0.15.7"
|
||||
nix = { version = "0.30.1", features = ["net"] }
|
||||
basic-jwt = "0.3.0"
|
||||
zip = "4.3.0"
|
||||
chrono = "0.4.42"
|
@@ -27,7 +27,10 @@ impl LibVirtActor {
|
||||
/// Connect to hypervisor
|
||||
pub async fn connect() -> anyhow::Result<Self> {
|
||||
let hypervisor_uri = AppConfig::get().hypervisor_uri.as_deref().unwrap_or("");
|
||||
log::info!("Will connect to hypvervisor at address '{hypervisor_uri}'",);
|
||||
log::info!(
|
||||
"Will connect to hypvervisor at address '{}'",
|
||||
hypervisor_uri
|
||||
);
|
||||
let conn = Connect::open(Some(hypervisor_uri))?;
|
||||
|
||||
Ok(Self { m: conn })
|
||||
@@ -99,7 +102,7 @@ impl Handler<GetDomainXMLReq> for LibVirtActor {
|
||||
log::debug!("Get domain XML:\n{}", msg.0.as_string());
|
||||
let domain = Domain::lookup_by_uuid_string(&self.m, &msg.0.as_string())?;
|
||||
let xml = domain.get_xml_desc(VIR_DOMAIN_XML_SECURE)?;
|
||||
log::debug!("XML = {xml}");
|
||||
log::debug!("XML = {}", xml);
|
||||
DomainXML::parse_xml(&xml)
|
||||
}
|
||||
}
|
||||
@@ -128,7 +131,7 @@ impl Handler<DefineDomainReq> for LibVirtActor {
|
||||
fn handle(&mut self, mut msg: DefineDomainReq, _ctx: &mut Self::Context) -> Self::Result {
|
||||
let xml = msg.1.as_xml()?;
|
||||
|
||||
log::debug!("Define domain:\n{xml}");
|
||||
log::debug!("Define domain:\n{}", xml);
|
||||
let domain = Domain::define_xml(&self.m, &xml)?;
|
||||
let uuid = XMLUuid::parse_from_str(&domain.get_uuid_string()?)?;
|
||||
|
||||
@@ -443,7 +446,7 @@ impl Handler<GetNetworkXMLReq> for LibVirtActor {
|
||||
log::debug!("Get network XML:\n{}", msg.0.as_string());
|
||||
let network = Network::lookup_by_uuid_string(&self.m, &msg.0.as_string())?;
|
||||
let xml = network.get_xml_desc(0)?;
|
||||
log::debug!("XML = {xml}");
|
||||
log::debug!("XML = {}", xml);
|
||||
NetworkXML::parse_xml(&xml)
|
||||
}
|
||||
}
|
||||
@@ -599,7 +602,7 @@ impl Handler<GetNWFilterXMLReq> for LibVirtActor {
|
||||
log::debug!("Get network filter XML:\n{}", msg.0.as_string());
|
||||
let filter = NWFilter::lookup_by_uuid_string(&self.m, &msg.0.as_string())?;
|
||||
let xml = filter.get_xml_desc(0)?;
|
||||
log::debug!("XML = {xml}");
|
||||
log::debug!("XML = {}", xml);
|
||||
NetworkFilterXML::parse_xml(xml)
|
||||
}
|
||||
}
|
||||
@@ -614,7 +617,7 @@ impl Handler<DefineNWFilterReq> for LibVirtActor {
|
||||
fn handle(&mut self, mut msg: DefineNWFilterReq, _ctx: &mut Self::Context) -> Self::Result {
|
||||
let xml = msg.1.into_xml()?;
|
||||
|
||||
log::debug!("Define network filter:\n{xml}");
|
||||
log::debug!("Define network filter:\n{}", xml);
|
||||
let filter = NWFilter::define_xml(&self.m, &xml)?;
|
||||
let uuid = XMLUuid::parse_from_str(&filter.get_uuid_string()?)?;
|
||||
|
||||
|
@@ -104,10 +104,10 @@ impl Token {
|
||||
|
||||
/// Check whether a token is expired or not
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if let Some(max_inactivity) = self.max_inactivity
|
||||
&& max_inactivity + self.last_used < time()
|
||||
{
|
||||
return true;
|
||||
if let Some(max_inactivity) = self.max_inactivity {
|
||||
if max_inactivity + self.last_used < time() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
@@ -188,10 +188,10 @@ impl NewToken {
|
||||
return Some(err);
|
||||
}
|
||||
|
||||
if let Some(t) = self.max_inactivity
|
||||
&& t < 3600
|
||||
{
|
||||
return Some("API tokens shall be valid for at least 1 hour!");
|
||||
if let Some(t) = self.max_inactivity {
|
||||
if t < 3600 {
|
||||
return Some("API tokens shall be valid for at least 1 hour!");
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
|
@@ -280,7 +280,7 @@ impl AppConfig {
|
||||
|
||||
/// Get VM vnc sockets path for domain
|
||||
pub fn vnc_socket_for_domain(&self, name: &str) -> PathBuf {
|
||||
self.vnc_sockets_path().join(format!("vnc-{name}"))
|
||||
self.vnc_sockets_path().join(format!("vnc-{}", name))
|
||||
}
|
||||
|
||||
/// Get VM root disks storage directory
|
||||
|
@@ -31,12 +31,13 @@ pub async fn upload(MultipartForm(mut form): MultipartForm<UploadDiskImageForm>)
|
||||
}
|
||||
|
||||
// Check file mime type
|
||||
if let Some(mime_type) = file.content_type
|
||||
&& !constants::ALLOWED_DISK_IMAGES_MIME_TYPES.contains(&mime_type.as_ref())
|
||||
{
|
||||
return Ok(HttpResponse::BadRequest().json(format!(
|
||||
"Unsupported file type for disk upload: {mime_type}"
|
||||
)));
|
||||
if let Some(mime_type) = file.content_type {
|
||||
if !constants::ALLOWED_DISK_IMAGES_MIME_TYPES.contains(&mime_type.as_ref()) {
|
||||
return Ok(HttpResponse::BadRequest().json(format!(
|
||||
"Unsupported file type for disk upload: {}",
|
||||
mime_type
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
// Extract and check file name
|
||||
|
@@ -31,11 +31,11 @@ pub async fn upload_file(MultipartForm(mut form): MultipartForm<UploadIsoForm>)
|
||||
return Ok(HttpResponse::BadRequest().json("File is too large!"));
|
||||
}
|
||||
|
||||
if let Some(m) = &file.content_type
|
||||
&& !constants::ALLOWED_ISO_MIME_TYPES.contains(&m.to_string().as_str())
|
||||
{
|
||||
log::error!("Uploaded ISO file has an invalid mimetype!");
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid mimetype!"));
|
||||
if let Some(m) = &file.content_type {
|
||||
if !constants::ALLOWED_ISO_MIME_TYPES.contains(&m.to_string().as_str()) {
|
||||
log::error!("Uploaded ISO file has an invalid mimetype!");
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid mimetype!"));
|
||||
}
|
||||
}
|
||||
|
||||
let file_name = match &file.file_name {
|
||||
@@ -52,7 +52,7 @@ pub async fn upload_file(MultipartForm(mut form): MultipartForm<UploadIsoForm>)
|
||||
}
|
||||
|
||||
let dest_file = AppConfig::get().iso_storage_path().join(file_name);
|
||||
log::info!("Will save ISO file {dest_file:?}");
|
||||
log::info!("Will save ISO file {:?}", dest_file);
|
||||
|
||||
if dest_file.exists() {
|
||||
log::error!("Conflict with uploaded iso file name!");
|
||||
@@ -87,16 +87,16 @@ pub async fn upload_from_url(req: web::Json<DownloadFromURLReq>) -> HttpResult {
|
||||
|
||||
let response = reqwest::get(&req.url).await?;
|
||||
|
||||
if let Some(len) = response.content_length()
|
||||
&& len > constants::ISO_MAX_SIZE.as_bytes() as u64
|
||||
{
|
||||
return Ok(HttpResponse::BadRequest().json("File is too large!"));
|
||||
if let Some(len) = response.content_length() {
|
||||
if len > constants::ISO_MAX_SIZE.as_bytes() as u64 {
|
||||
return Ok(HttpResponse::BadRequest().json("File is too large!"));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ct) = response.headers().get("content-type")
|
||||
&& !constants::ALLOWED_ISO_MIME_TYPES.contains(&ct.to_str()?)
|
||||
{
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid file mimetype!"));
|
||||
if let Some(ct) = response.headers().get("content-type") {
|
||||
if !constants::ALLOWED_ISO_MIME_TYPES.contains(&ct.to_str()?) {
|
||||
return Ok(HttpResponse::BadRequest().json("Invalid file mimetype!"));
|
||||
}
|
||||
}
|
||||
|
||||
let mut stream = response.bytes_stream();
|
||||
|
@@ -4,7 +4,6 @@ use actix_web::body::BoxBody;
|
||||
use actix_web::{HttpResponse, web};
|
||||
use std::error::Error;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use zip::result::ZipError;
|
||||
|
||||
pub mod api_tokens_controller;
|
||||
pub mod auth_controller;
|
||||
@@ -43,7 +42,7 @@ impl actix_web::error::ResponseError for HttpErr {
|
||||
}
|
||||
}
|
||||
fn error_response(&self) -> HttpResponse<BoxBody> {
|
||||
log::error!("Error while processing request! {self}");
|
||||
log::error!("Error while processing request! {}", self);
|
||||
|
||||
HttpResponse::InternalServerError().body("Failed to execute request!")
|
||||
}
|
||||
@@ -103,12 +102,6 @@ impl From<actix_web::Error> for HttpErr {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ZipError> for HttpErr {
|
||||
fn from(value: ZipError) -> Self {
|
||||
HttpErr::Err(std::io::Error::other(value.to_string()).into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HttpResponse> for HttpErr {
|
||||
fn from(value: HttpResponse) -> Self {
|
||||
HttpErr::HTTPResponse(value)
|
||||
|
@@ -1,24 +1,14 @@
|
||||
use crate::actors::vnc_tokens_actor::VNC_TOKEN_LIFETIME;
|
||||
use crate::app_config::AppConfig;
|
||||
use crate::constants;
|
||||
use crate::constants::{DISK_NAME_MAX_LEN, DISK_NAME_MIN_LEN, DISK_SIZE_MAX, DISK_SIZE_MIN};
|
||||
use crate::controllers::{HttpResult, LibVirtReq};
|
||||
use crate::extractors::local_auth_extractor::LocalAuthEnabled;
|
||||
use crate::libvirt_rest_structures::hypervisor::HypervisorInfo;
|
||||
use crate::libvirt_rest_structures::net::NetworkInfo;
|
||||
use crate::libvirt_rest_structures::nw_filter::NetworkFilter;
|
||||
use crate::libvirt_rest_structures::vm::VMInfo;
|
||||
use crate::nat::nat_hook;
|
||||
use crate::utils::net_utils;
|
||||
use crate::utils::time_utils::{format_date, time};
|
||||
use crate::{api_tokens, constants};
|
||||
use actix_files::NamedFile;
|
||||
use actix_web::{HttpRequest, HttpResponse, Responder};
|
||||
use serde::Serialize;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use actix_web::{HttpResponse, Responder};
|
||||
use sysinfo::{Components, Disks, Networks, System};
|
||||
use zip::ZipWriter;
|
||||
use zip::write::SimpleFileOptions;
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct StaticConfig {
|
||||
@@ -209,85 +199,3 @@ pub async fn networks_list() -> HttpResult {
|
||||
pub async fn bridges_list() -> HttpResult {
|
||||
Ok(HttpResponse::Ok().json(net_utils::bridges_list()?))
|
||||
}
|
||||
|
||||
/// Add JSON file to ZIP
|
||||
fn zip_json<E: Serialize, F>(
|
||||
zip: &mut ZipWriter<File>,
|
||||
dir: &str,
|
||||
content: &Vec<E>,
|
||||
file_name: F,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
F: Fn(&E) -> String,
|
||||
{
|
||||
for entry in content {
|
||||
let file_encoded = serde_json::to_string(&entry)?;
|
||||
|
||||
let options = SimpleFileOptions::default()
|
||||
.compression_method(zip::CompressionMethod::Deflated)
|
||||
.unix_permissions(0o750);
|
||||
|
||||
zip.start_file(format!("{dir}/{}.json", file_name(entry)), options)?;
|
||||
zip.write_all(file_encoded.as_bytes())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Export all configuration elements at once
|
||||
pub async fn export_all_configs(req: HttpRequest, client: LibVirtReq) -> HttpResult {
|
||||
// Perform extractions
|
||||
let vms = client
|
||||
.get_full_domains_list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(VMInfo::from_domain)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let networks = client
|
||||
.get_full_networks_list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(NetworkInfo::from_xml)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let nw_filters = client
|
||||
.get_full_network_filters_list()
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(NetworkFilter::lib2rest)
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
let tokens = api_tokens::full_list().await?;
|
||||
|
||||
// Create ZIP file
|
||||
let dest_dir = tempfile::tempdir_in(&AppConfig::get().temp_dir)?;
|
||||
let zip_path = dest_dir.path().join("export.zip");
|
||||
|
||||
let file = File::create(&zip_path)?;
|
||||
let mut zip = ZipWriter::new(file);
|
||||
|
||||
// Encode entities to JSON
|
||||
zip_json(&mut zip, "vms", &vms, |v| v.name.to_string())?;
|
||||
zip_json(&mut zip, "networks", &networks, |v| v.name.0.to_string())?;
|
||||
zip_json(
|
||||
&mut zip,
|
||||
"nw_filters",
|
||||
&nw_filters,
|
||||
|v| match constants::BUILTIN_NETWORK_FILTER_RULES.contains(&v.name.0.as_str()) {
|
||||
true => format!("builtin/{}", v.name.0),
|
||||
false => v.name.0.to_string(),
|
||||
},
|
||||
)?;
|
||||
zip_json(&mut zip, "tokens", &tokens, |v| v.id.0.to_string())?;
|
||||
|
||||
// Finalize ZIP and return response
|
||||
zip.finish()?;
|
||||
let file = File::open(zip_path)?;
|
||||
|
||||
let file = NamedFile::from_file(
|
||||
file,
|
||||
format!(
|
||||
"export_{}.zip",
|
||||
format_date(time() as i64).unwrap().replace('/', "-")
|
||||
),
|
||||
)?;
|
||||
|
||||
Ok(file.into_response(&req))
|
||||
}
|
||||
|
@@ -128,21 +128,21 @@ impl FromRequest for ApiAuthExtractor {
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(ip) = token.ip_restriction
|
||||
&& !ip.contains(remote_ip.0)
|
||||
{
|
||||
log::error!(
|
||||
"Attempt to use a token for an unauthorized IP! {remote_ip:?} token_id={}",
|
||||
token.id.0
|
||||
);
|
||||
return Err(ErrorUnauthorized("Token cannot be used from this IP!"));
|
||||
if let Some(ip) = token.ip_restriction {
|
||||
if !ip.contains(remote_ip.0) {
|
||||
log::error!(
|
||||
"Attempt to use a token for an unauthorized IP! {remote_ip:?} token_id={}",
|
||||
token.id.0
|
||||
);
|
||||
return Err(ErrorUnauthorized("Token cannot be used from this IP!"));
|
||||
}
|
||||
}
|
||||
|
||||
if token.should_update_last_activity()
|
||||
&& let Err(e) = api_tokens::refresh_last_used(token.id).await
|
||||
{
|
||||
log::error!("Could not update token last activity! {e}");
|
||||
return Err(ErrorBadRequest("Couldn't refresh token last activity!"));
|
||||
if token.should_update_last_activity() {
|
||||
if let Err(e) = api_tokens::refresh_last_used(token.id).await {
|
||||
log::error!("Could not update token last activity! {e}");
|
||||
return Err(ErrorBadRequest("Couldn't refresh token last activity!"));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ApiAuthExtractor { token, claims })
|
||||
|
@@ -96,28 +96,28 @@ impl NetworkInfo {
|
||||
return Err(StructureExtraction("network name is invalid!").into());
|
||||
}
|
||||
|
||||
if let Some(n) = &self.title
|
||||
&& n.contains('\n')
|
||||
{
|
||||
return Err(StructureExtraction("Network title contain newline char!").into());
|
||||
if let Some(n) = &self.title {
|
||||
if n.contains('\n') {
|
||||
return Err(StructureExtraction("Network title contain newline char!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(dev) = &self.device
|
||||
&& !regex!("^[a-zA-Z0-9]+$").is_match(dev)
|
||||
{
|
||||
return Err(StructureExtraction("Network device name is invalid!").into());
|
||||
if let Some(dev) = &self.device {
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(dev) {
|
||||
return Err(StructureExtraction("Network device name is invalid!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(bridge) = &self.bridge_name
|
||||
&& !regex!("^[a-zA-Z0-9]+$").is_match(bridge)
|
||||
{
|
||||
return Err(StructureExtraction("Network bridge name is invalid!").into());
|
||||
if let Some(bridge) = &self.bridge_name {
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(bridge) {
|
||||
return Err(StructureExtraction("Network bridge name is invalid!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.domain
|
||||
&& !regex!("^[a-zA-Z0-9.]+$").is_match(domain)
|
||||
{
|
||||
return Err(StructureExtraction("Domain name is invalid!").into());
|
||||
if let Some(domain) = &self.domain {
|
||||
if !regex!("^[a-zA-Z0-9.]+$").is_match(domain) {
|
||||
return Err(StructureExtraction("Domain name is invalid!").into());
|
||||
}
|
||||
}
|
||||
|
||||
let mut ips = Vec::with_capacity(2);
|
||||
@@ -303,16 +303,16 @@ impl NetworkInfo {
|
||||
|
||||
/// Check if at least one NAT definition was specified on this interface
|
||||
pub fn has_nat_def(&self) -> bool {
|
||||
if let Some(ipv4) = &self.ip_v4
|
||||
&& ipv4.nat.is_some()
|
||||
{
|
||||
return true;
|
||||
if let Some(ipv4) = &self.ip_v4 {
|
||||
if ipv4.nat.is_some() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ipv6) = &self.ip_v6
|
||||
&& ipv6.nat.is_some()
|
||||
{
|
||||
return true;
|
||||
if let Some(ipv6) = &self.ip_v6 {
|
||||
if ipv6.nat.is_some() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
|
@@ -43,12 +43,14 @@ impl From<&String> for NetworkFilterMacAddressOrVar {
|
||||
fn extract_mac_address_or_var(
|
||||
n: &Option<NetworkFilterMacAddressOrVar>,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if let Some(mac) = n
|
||||
&& !mac.is_valid()
|
||||
{
|
||||
return Err(
|
||||
NetworkFilterExtraction(format!("Invalid mac address or variable! {}", mac.0)).into(),
|
||||
);
|
||||
if let Some(mac) = n {
|
||||
if !mac.is_valid() {
|
||||
return Err(NetworkFilterExtraction(format!(
|
||||
"Invalid mac address or variable! {}",
|
||||
mac.0
|
||||
))
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(n.as_ref().map(|n| n.0.to_string()))
|
||||
@@ -81,34 +83,34 @@ impl<const V: usize> From<&String> for NetworkFilterIPOrVar<V> {
|
||||
fn extract_ip_or_var<const V: usize>(
|
||||
n: &Option<NetworkFilterIPOrVar<V>>,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if let Some(ip) = n
|
||||
&& !ip.is_valid()
|
||||
{
|
||||
return Err(NetworkFilterExtraction(format!(
|
||||
"Invalid IPv{V} address or variable! {}",
|
||||
ip.0
|
||||
))
|
||||
.into());
|
||||
if let Some(ip) = n {
|
||||
if !ip.is_valid() {
|
||||
return Err(NetworkFilterExtraction(format!(
|
||||
"Invalid IPv{V} address or variable! {}",
|
||||
ip.0
|
||||
))
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(n.as_ref().map(|n| n.0.to_string()))
|
||||
}
|
||||
|
||||
fn extract_ip_mask<const V: usize>(n: Option<u8>) -> anyhow::Result<Option<u8>> {
|
||||
if let Some(mask) = n
|
||||
&& !net_utils::is_mask_valid(V, mask)
|
||||
{
|
||||
return Err(NetworkFilterExtraction(format!("Invalid IPv{V} mask! {mask}")).into());
|
||||
if let Some(mask) = n {
|
||||
if !net_utils::is_mask_valid(V, mask) {
|
||||
return Err(NetworkFilterExtraction(format!("Invalid IPv{V} mask! {mask}")).into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
fn extract_nw_filter_comment(n: &Option<String>) -> anyhow::Result<Option<String>> {
|
||||
if let Some(comment) = n
|
||||
&& (comment.len() > 256 || comment.contains('\"') || comment.contains('\n'))
|
||||
{
|
||||
return Err(NetworkFilterExtraction(format!("Invalid comment! {comment}")).into());
|
||||
if let Some(comment) = n {
|
||||
if comment.len() > 256 || comment.contains('\"') || comment.contains('\n') {
|
||||
return Err(NetworkFilterExtraction(format!("Invalid comment! {}", comment)).into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(n.clone())
|
||||
@@ -867,10 +869,12 @@ impl NetworkFilter {
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(priority) = self.priority
|
||||
&& !(-1000..=1000).contains(&priority)
|
||||
{
|
||||
return Err(NetworkFilterExtraction("Network priority is invalid!".to_string()).into());
|
||||
if let Some(priority) = self.priority {
|
||||
if !(-1000..=1000).contains(&priority) {
|
||||
return Err(
|
||||
NetworkFilterExtraction("Network priority is invalid!".to_string()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for fref in &self.join_filters {
|
||||
|
@@ -118,22 +118,22 @@ impl VMInfo {
|
||||
XMLUuid::new_random()
|
||||
};
|
||||
|
||||
if let Some(n) = &self.genid
|
||||
&& !n.is_valid()
|
||||
{
|
||||
return Err(StructureExtraction("VM genid is invalid!").into());
|
||||
if let Some(n) = &self.genid {
|
||||
if !n.is_valid() {
|
||||
return Err(StructureExtraction("VM genid is invalid!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(n) = &self.title
|
||||
&& n.contains('\n')
|
||||
{
|
||||
return Err(StructureExtraction("VM title contain newline char!").into());
|
||||
if let Some(n) = &self.title {
|
||||
if n.contains('\n') {
|
||||
return Err(StructureExtraction("VM title contain newline char!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(group) = &self.group
|
||||
&& !regex!("^[a-zA-Z0-9]+$").is_match(&group.0)
|
||||
{
|
||||
return Err(StructureExtraction("VM group name is invalid!").into());
|
||||
if let Some(group) = &self.group {
|
||||
if !regex!("^[a-zA-Z0-9]+$").is_match(&group.0) {
|
||||
return Err(StructureExtraction("VM group name is invalid!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if self.memory < constants::MIN_VM_MEMORY || self.memory > constants::MAX_VM_MEMORY {
|
||||
|
@@ -157,10 +157,6 @@ async fn main() -> std::io::Result<()> {
|
||||
"/api/server/bridges",
|
||||
web::get().to(server_controller::bridges_list),
|
||||
)
|
||||
.route(
|
||||
"/api/server/export_configs",
|
||||
web::get().to(server_controller::export_all_configs),
|
||||
)
|
||||
// Auth controller
|
||||
.route(
|
||||
"/api/auth/local",
|
||||
|
@@ -69,7 +69,8 @@ where
|
||||
|
||||
if !AppConfig::get().is_allowed_ip(remote_ip.0) {
|
||||
log::error!(
|
||||
"An attempt to access VirtWeb from an unauthorized network has been intercepted! {remote_ip:?}"
|
||||
"An attempt to access VirtWeb from an unauthorized network has been intercepted! {:?}",
|
||||
remote_ip
|
||||
);
|
||||
return Ok(req
|
||||
.into_response(
|
||||
|
@@ -60,10 +60,10 @@ pub struct Nat<IPv> {
|
||||
|
||||
impl<IPv> Nat<IPv> {
|
||||
pub fn check(&self) -> anyhow::Result<()> {
|
||||
if let NatSourceIP::Interface { name } = &self.host_ip
|
||||
&& !net_utils::is_net_interface_name_valid(name)
|
||||
{
|
||||
return Err(NatDefError::InvalidNatDef("Invalid nat interface name!").into());
|
||||
if let NatSourceIP::Interface { name } = &self.host_ip {
|
||||
if !net_utils::is_net_interface_name_valid(name) {
|
||||
return Err(NatDefError::InvalidNatDef("Invalid nat interface name!").into());
|
||||
}
|
||||
}
|
||||
|
||||
if let NatHostPort::Range { start, end } = &self.host_port {
|
||||
@@ -84,10 +84,10 @@ impl<IPv> Nat<IPv> {
|
||||
return Err(NatDefError::InvalidNatDef("Invalid guest port!").into());
|
||||
}
|
||||
|
||||
if let Some(comment) = &self.comment
|
||||
&& comment.len() > constants::NET_NAT_COMMENT_MAX_SIZE
|
||||
{
|
||||
return Err(NatDefError::InvalidNatDef("Comment is too large!").into());
|
||||
if let Some(comment) = &self.comment {
|
||||
if comment.len() > constants::NET_NAT_COMMENT_MAX_SIZE {
|
||||
return Err(NatDefError::InvalidNatDef("Comment is too large!").into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@@ -81,10 +81,10 @@ impl CloudInitConfig {
|
||||
// Process metadata
|
||||
let mut metadatas = vec![];
|
||||
if let Some(inst_id) = &self.instance_id {
|
||||
metadatas.push(format!("instance-id: {inst_id}"));
|
||||
metadatas.push(format!("instance-id: {}", inst_id));
|
||||
}
|
||||
if let Some(local_hostname) = &self.local_hostname {
|
||||
metadatas.push(format!("local-hostname: {local_hostname}"));
|
||||
metadatas.push(format!("local-hostname: {}", local_hostname));
|
||||
}
|
||||
if let Some(dsmode) = &self.dsmode {
|
||||
metadatas.push(format!(
|
||||
|
@@ -1,4 +1,3 @@
|
||||
use chrono::Datelike;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
/// Get the current time since epoch
|
||||
@@ -14,15 +13,3 @@ pub fn time() -> u64 {
|
||||
.unwrap()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
/// Format given UNIX time in a simple format
|
||||
pub fn format_date(time: i64) -> anyhow::Result<String> {
|
||||
let date = chrono::DateTime::from_timestamp(time, 0).ok_or(anyhow::anyhow!("invalid date"))?;
|
||||
|
||||
Ok(format!(
|
||||
"{:0>2}/{:0>2}/{}",
|
||||
date.day(),
|
||||
date.month(),
|
||||
date.year()
|
||||
))
|
||||
}
|
||||
|
1319
virtweb_frontend/package-lock.json
generated
1319
virtweb_frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -11,46 +11,46 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@emotion/react": "^11.14.0",
|
||||
"@emotion/styled": "^11.14.1",
|
||||
"@fontsource/roboto": "^5.2.8",
|
||||
"@emotion/styled": "^11.14.0",
|
||||
"@fontsource/roboto": "^5.2.6",
|
||||
"@mdi/js": "^7.4.47",
|
||||
"@mdi/react": "^1.6.1",
|
||||
"@monaco-editor/react": "^4.7.0",
|
||||
"@mui/icons-material": "^7.3.1",
|
||||
"@mui/material": "^7.3.1",
|
||||
"@mui/icons-material": "^7.1.1",
|
||||
"@mui/material": "^7.1.1",
|
||||
"@mui/x-charts": "^8.3.1",
|
||||
"@mui/x-data-grid": "^8.11.3",
|
||||
"@mui/x-data-grid": "^8.3.1",
|
||||
"date-and-time": "^3.6.0",
|
||||
"filesize": "^10.1.6",
|
||||
"humanize-duration": "^3.33.0",
|
||||
"humanize-duration": "^3.32.2",
|
||||
"monaco-editor": "^0.52.2",
|
||||
"monaco-yaml": "^5.4.0",
|
||||
"react": "^19.1.1",
|
||||
"react-dom": "^19.1.1",
|
||||
"react-router-dom": "^7.8.0",
|
||||
"react-syntax-highlighter": "^15.6.6",
|
||||
"react": "^19.1.0",
|
||||
"react-dom": "^19.1.0",
|
||||
"react-router-dom": "^7.6.2",
|
||||
"react-syntax-highlighter": "^15.6.1",
|
||||
"react-vnc": "^3.1.0",
|
||||
"uuid": "^11.1.0",
|
||||
"xml-formatter": "^3.6.6",
|
||||
"yaml": "^2.8.1"
|
||||
"yaml": "^2.8.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.35.0",
|
||||
"@eslint/js": "^9.27.0",
|
||||
"@types/humanize-duration": "^3.27.4",
|
||||
"@types/jest": "^30.0.0",
|
||||
"@types/react": "^19.1.13",
|
||||
"@types/react-dom": "^19.1.9",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/react": "^19.1.8",
|
||||
"@types/react-dom": "^19.1.6",
|
||||
"@types/react-syntax-highlighter": "^15.5.13",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@vitejs/plugin-react": "^4.7.0",
|
||||
"eslint": "^9.35.0",
|
||||
"eslint-plugin-react-dom": "^1.53.1",
|
||||
"@vitejs/plugin-react": "^4.4.1",
|
||||
"eslint": "^9.27.0",
|
||||
"eslint-plugin-react-dom": "^1.49.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.20",
|
||||
"eslint-plugin-react-x": "^1.52.9",
|
||||
"globals": "^16.3.0",
|
||||
"typescript": "^5.9.2",
|
||||
"typescript-eslint": "^8.43.0",
|
||||
"vite": "^6.3.6"
|
||||
"eslint-plugin-react-x": "^1.49.0",
|
||||
"globals": "^16.1.0",
|
||||
"typescript": "^5.8.3",
|
||||
"typescript-eslint": "^8.32.1",
|
||||
"vite": "^6.3.5"
|
||||
}
|
||||
}
|
||||
|
@@ -232,16 +232,4 @@ export class ServerApi {
|
||||
})
|
||||
).data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Export all server configs
|
||||
*/
|
||||
static async ExportServerConfigs(): Promise<Blob> {
|
||||
return (
|
||||
await APIClient.exec({
|
||||
method: "GET",
|
||||
uri: "/server/export_configs",
|
||||
})
|
||||
).data;
|
||||
}
|
||||
}
|
||||
|
@@ -9,21 +9,18 @@ import {
|
||||
import Icon from "@mdi/react";
|
||||
import {
|
||||
Box,
|
||||
IconButton,
|
||||
LinearProgress,
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableHead,
|
||||
TableRow,
|
||||
Tooltip,
|
||||
Typography,
|
||||
} from "@mui/material";
|
||||
import Grid from "@mui/material/Grid";
|
||||
import { PieChart } from "@mui/x-charts";
|
||||
import { filesize } from "filesize";
|
||||
import humanizeDuration from "humanize-duration";
|
||||
import IosShareIcon from "@mui/icons-material/IosShare";
|
||||
import React from "react";
|
||||
import {
|
||||
DiskInfo,
|
||||
@@ -34,8 +31,6 @@ import {
|
||||
import { AsyncWidget } from "../widgets/AsyncWidget";
|
||||
import { VirtWebPaper } from "../widgets/VirtWebPaper";
|
||||
import { VirtWebRouteContainer } from "../widgets/VirtWebRouteContainer";
|
||||
import { useLoadingMessage } from "../hooks/providers/LoadingMessageProvider";
|
||||
import { useAlert } from "../hooks/providers/AlertDialogProvider";
|
||||
|
||||
export function SysInfoRoute(): React.ReactElement {
|
||||
const [info, setInfo] = React.useState<ServerSystemInfo>();
|
||||
@@ -57,23 +52,6 @@ export function SysInfoRoute(): React.ReactElement {
|
||||
export function SysInfoRouteInner(p: {
|
||||
info: ServerSystemInfo;
|
||||
}): React.ReactElement {
|
||||
const alert = useAlert();
|
||||
const loadingMessage = useLoadingMessage();
|
||||
const downloadAllConfig = async () => {
|
||||
try {
|
||||
loadingMessage.show("Downloading server config...");
|
||||
const res = await ServerApi.ExportServerConfigs();
|
||||
|
||||
const url = URL.createObjectURL(res);
|
||||
window.location.href = url;
|
||||
} catch (e) {
|
||||
console.error("Failed to download server config!", e);
|
||||
alert(`Failed to download server config! ${e}`);
|
||||
} finally {
|
||||
loadingMessage.hide();
|
||||
}
|
||||
};
|
||||
|
||||
const sumDiskUsage = p.info.disks.reduce(
|
||||
(prev, disk) => {
|
||||
return {
|
||||
@@ -85,16 +63,7 @@ export function SysInfoRouteInner(p: {
|
||||
);
|
||||
|
||||
return (
|
||||
<VirtWebRouteContainer
|
||||
label="Sysinfo"
|
||||
actions={
|
||||
<Tooltip title="Export all server configs">
|
||||
<IconButton onClick={downloadAllConfig}>
|
||||
<IosShareIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
}
|
||||
>
|
||||
<VirtWebRouteContainer label="Sysinfo">
|
||||
<Grid container spacing={2}>
|
||||
{/* Memory */}
|
||||
<Grid size={{ xs: 4 }}>
|
||||
@@ -319,7 +288,7 @@ function DiskDetailsTable(p: { disks: DiskInfo[] }): React.ReactElement {
|
||||
{p.disks.map((e, c) => (
|
||||
<TableRow hover key={c}>
|
||||
<TableCell>{e.name}</TableCell>
|
||||
<TableCell>{String(e.DiskKind)}</TableCell>
|
||||
<TableCell>{e.DiskKind}</TableCell>
|
||||
<TableCell>{e.mount_point}</TableCell>
|
||||
<TableCell>{filesize(e.total_space)}</TableCell>
|
||||
<TableCell>{filesize(e.available_space)}</TableCell>
|
||||
|
@@ -333,7 +333,8 @@ function CloudInitBooleanInput(p: {
|
||||
label={p.name}
|
||||
checked={p.yaml.getIn(p.attrPath) === true}
|
||||
onValueChange={(v) => {
|
||||
p.yaml.setIn(p.attrPath, v);
|
||||
if (v) p.yaml.setIn(p.attrPath, v);
|
||||
else p.yaml.deleteIn(p.attrPath);
|
||||
p.onChange?.();
|
||||
}}
|
||||
/>
|
||||
|
@@ -799,11 +799,6 @@ export function TokenRightsEditor(p: {
|
||||
right={{ verb: "GET", path: "/api/server/bridges" }}
|
||||
label="Get list of network bridges"
|
||||
/>
|
||||
<RouteRight
|
||||
{...p}
|
||||
right={{ verb: "GET", path: "/api/server/export_configs" }}
|
||||
label="Export all configurations"
|
||||
/>
|
||||
</RightsSection>
|
||||
</>
|
||||
);
|
||||
|
Reference in New Issue
Block a user