Add Tauri UI

This commit is contained in:
Zoltán Papp
2026-03-05 08:53:24 +01:00
parent 1451cedf86
commit f8cf994900
58 changed files with 16900 additions and 0 deletions

5746
client/uitauri/src-tauri/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
[package]
name = "netbird-ui"
version = "0.1.0"
edition = "2021"
[build-dependencies]
tauri-build = { version = "2", features = [] }
tonic-build = "0.12"
[dependencies]
tauri = { version = "2", features = ["tray-icon", "image-png"] }
tauri-plugin-single-instance = "2"
notify-rust = "4"
tonic = "0.12"
prost = "0.13"
prost-types = "0.13"
tokio = { version = "1", features = ["full"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
log = "0.4"
env_logger = "0.11"
tower = "0.5"
hyper-util = "0.1"
http = "1"
[target.'cfg(unix)'.dependencies]
tokio-stream = "0.1"
tower = "0.5"
hyper-util = "0.1"

View File

@@ -0,0 +1,7 @@
fn main() {
// Compile the daemon.proto for tonic gRPC client
tonic_build::compile_protos("../../proto/daemon.proto")
.expect("Failed to compile daemon.proto");
tauri_build::build();
}

View File

@@ -0,0 +1,10 @@
{
"$schema": "https://raw.githubusercontent.com/nicegui/nicegui/main/nicegui/static/tauri/capabilities-schema.json",
"identifier": "default",
"description": "Default capabilities for the NetBird UI",
"windows": ["main"],
"permissions": [
"core:default",
"core:tray:default"
]
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"default":{"identifier":"default","description":"Default capabilities for the NetBird UI","local":true,"windows":["main"],"permissions":["core:default","core:tray:default"]}}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

View File

@@ -0,0 +1,72 @@
use serde::Serialize;
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct StatusInfo {
pub status: String,
pub ip: String,
pub public_key: String,
pub fqdn: String,
pub connected_peers: usize,
}
#[tauri::command]
pub async fn get_status(state: State<'_, AppState>) -> Result<StatusInfo, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.status(proto::StatusRequest {
get_full_peer_status: true,
should_run_probes: false,
wait_for_ready: None,
})
.await
.map_err(|e| format!("status rpc: {}", e))?
.into_inner();
let mut info = StatusInfo {
status: resp.status,
ip: String::new(),
public_key: String::new(),
fqdn: String::new(),
connected_peers: 0,
};
if let Some(ref full) = resp.full_status {
if let Some(ref lp) = full.local_peer_state {
info.ip = lp.ip.clone();
info.public_key = lp.pub_key.clone();
info.fqdn = lp.fqdn.clone();
}
info.connected_peers = full.peers.len();
}
Ok(info)
}
#[tauri::command]
pub async fn connect(state: State<'_, AppState>) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.up(proto::UpRequest {
profile_name: None,
username: None,
auto_update: None,
})
.await
.map_err(|e| format!("connect: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn disconnect(state: State<'_, AppState>) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.down(proto::DownRequest {})
.await
.map_err(|e| format!("disconnect: {}", e))?;
Ok(())
}

View File

@@ -0,0 +1,188 @@
use serde::{Deserialize, Serialize};
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DebugBundleParams {
pub anonymize: bool,
pub system_info: bool,
pub upload: bool,
pub upload_url: String,
pub run_duration_mins: u32,
pub enable_persistence: bool,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct DebugBundleResult {
pub local_path: String,
pub uploaded_key: String,
pub upload_failure_reason: String,
}
#[tauri::command]
pub async fn create_debug_bundle(
state: State<'_, AppState>,
params: DebugBundleParams,
) -> Result<DebugBundleResult, String> {
let mut client = state.grpc.get_client().await?;
// If run_duration_mins > 0, do the full debug cycle
if params.run_duration_mins > 0 {
configure_for_debug(&mut client, &params).await?;
}
let upload_url = if params.upload && !params.upload_url.is_empty() {
params.upload_url.clone()
} else {
String::new()
};
let resp = client
.debug_bundle(proto::DebugBundleRequest {
anonymize: params.anonymize,
system_info: params.system_info,
upload_url: upload_url,
log_file_count: 0,
})
.await
.map_err(|e| format!("create debug bundle: {}", e))?
.into_inner();
Ok(DebugBundleResult {
local_path: resp.path,
uploaded_key: resp.uploaded_key,
upload_failure_reason: resp.upload_failure_reason,
})
}
async fn configure_for_debug(
client: &mut proto::daemon_service_client::DaemonServiceClient<tonic::transport::Channel>,
params: &DebugBundleParams,
) -> Result<(), String> {
// Get current status
let status_resp = client
.status(proto::StatusRequest {
get_full_peer_status: false,
should_run_probes: false,
wait_for_ready: None,
})
.await
.map_err(|e| format!("get status: {}", e))?
.into_inner();
let was_connected =
status_resp.status == "Connected" || status_resp.status == "Connecting";
// Get current log level
let log_resp = client
.get_log_level(proto::GetLogLevelRequest {})
.await
.map_err(|e| format!("get log level: {}", e))?
.into_inner();
let original_level = log_resp.level;
// Set trace log level
client
.set_log_level(proto::SetLogLevelRequest {
level: proto::LogLevel::Trace.into(),
})
.await
.map_err(|e| format!("set log level: {}", e))?;
// Bring down then up
let _ = client.down(proto::DownRequest {}).await;
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
if params.enable_persistence {
let _ = client
.set_sync_response_persistence(proto::SetSyncResponsePersistenceRequest {
enabled: true,
})
.await;
}
client
.up(proto::UpRequest {
profile_name: None,
username: None,
auto_update: None,
})
.await
.map_err(|e| format!("bring service up: {}", e))?;
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
let _ = client
.start_cpu_profile(proto::StartCpuProfileRequest {})
.await;
// Wait for collection duration
let duration = std::time::Duration::from_secs(params.run_duration_mins as u64 * 60);
tokio::time::sleep(duration).await;
let _ = client
.stop_cpu_profile(proto::StopCpuProfileRequest {})
.await;
// Restore original state
if !was_connected {
let _ = client.down(proto::DownRequest {}).await;
}
if original_level < proto::LogLevel::Trace as i32 {
let _ = client
.set_log_level(proto::SetLogLevelRequest {
level: original_level,
})
.await;
}
Ok(())
}
#[tauri::command]
pub async fn get_log_level(state: State<'_, AppState>) -> Result<String, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.get_log_level(proto::GetLogLevelRequest {})
.await
.map_err(|e| format!("get log level rpc: {}", e))?
.into_inner();
let level_name = match proto::LogLevel::try_from(resp.level) {
Ok(proto::LogLevel::Trace) => "TRACE",
Ok(proto::LogLevel::Debug) => "DEBUG",
Ok(proto::LogLevel::Info) => "INFO",
Ok(proto::LogLevel::Warn) => "WARN",
Ok(proto::LogLevel::Error) => "ERROR",
Ok(proto::LogLevel::Fatal) => "FATAL",
Ok(proto::LogLevel::Panic) => "PANIC",
_ => "UNKNOWN",
};
Ok(level_name.to_string())
}
#[tauri::command]
pub async fn set_log_level(state: State<'_, AppState>, level: String) -> Result<(), String> {
let proto_level = match level.as_str() {
"TRACE" => proto::LogLevel::Trace,
"DEBUG" => proto::LogLevel::Debug,
"INFO" => proto::LogLevel::Info,
"WARN" | "WARNING" => proto::LogLevel::Warn,
"ERROR" => proto::LogLevel::Error,
_ => proto::LogLevel::Info,
};
let mut client = state.grpc.get_client().await?;
client
.set_log_level(proto::SetLogLevelRequest {
level: proto_level.into(),
})
.await
.map_err(|e| format!("set log level rpc: {}", e))?;
Ok(())
}

View File

@@ -0,0 +1,7 @@
pub mod connection;
pub mod debug;
pub mod network;
pub mod peers;
pub mod profile;
pub mod settings;
pub mod update;

View File

@@ -0,0 +1,164 @@
use std::collections::HashMap;
use serde::Serialize;
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct NetworkInfo {
pub id: String,
pub range: String,
pub domains: Vec<String>,
pub selected: bool,
#[serde(rename = "resolvedIPs")]
pub resolved_ips: HashMap<String, Vec<String>>,
}
fn network_from_proto(r: &proto::Network) -> NetworkInfo {
let mut resolved = HashMap::new();
for (domain, ip_list) in &r.resolved_i_ps {
resolved.insert(domain.clone(), ip_list.ips.clone());
}
NetworkInfo {
id: r.id.clone(),
range: r.range.clone(),
domains: r.domains.clone(),
selected: r.selected,
resolved_ips: resolved,
}
}
async fn fetch_networks(state: &State<'_, AppState>) -> Result<Vec<NetworkInfo>, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.list_networks(proto::ListNetworksRequest {})
.await
.map_err(|e| format!("list networks rpc: {}", e))?
.into_inner();
let mut routes: Vec<NetworkInfo> = resp.routes.iter().map(network_from_proto).collect();
routes.sort_by(|a, b| a.id.to_lowercase().cmp(&b.id.to_lowercase()));
Ok(routes)
}
#[tauri::command]
pub async fn list_networks(state: State<'_, AppState>) -> Result<Vec<NetworkInfo>, String> {
fetch_networks(&state).await
}
#[tauri::command]
pub async fn list_overlapping_networks(
state: State<'_, AppState>,
) -> Result<Vec<NetworkInfo>, String> {
let all = fetch_networks(&state).await?;
let mut by_range: HashMap<String, Vec<NetworkInfo>> = HashMap::new();
for r in all {
if !r.domains.is_empty() {
continue;
}
by_range.entry(r.range.clone()).or_default().push(r);
}
let mut result = Vec::new();
for group in by_range.values() {
if group.len() > 1 {
result.extend(group.iter().cloned());
}
}
Ok(result)
}
#[tauri::command]
pub async fn list_exit_nodes(state: State<'_, AppState>) -> Result<Vec<NetworkInfo>, String> {
let all = fetch_networks(&state).await?;
Ok(all.into_iter().filter(|r| r.range == "0.0.0.0/0").collect())
}
#[tauri::command]
pub async fn select_network(state: State<'_, AppState>, id: String) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.select_networks(proto::SelectNetworksRequest {
network_i_ds: vec![id],
append: true,
all: false,
})
.await
.map_err(|e| format!("select network: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn deselect_network(state: State<'_, AppState>, id: String) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.deselect_networks(proto::SelectNetworksRequest {
network_i_ds: vec![id],
append: false,
all: false,
})
.await
.map_err(|e| format!("deselect network: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn select_networks(state: State<'_, AppState>, ids: Vec<String>) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.select_networks(proto::SelectNetworksRequest {
network_i_ds: ids,
append: true,
all: false,
})
.await
.map_err(|e| format!("select networks: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn deselect_networks(
state: State<'_, AppState>,
ids: Vec<String>,
) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.deselect_networks(proto::SelectNetworksRequest {
network_i_ds: ids,
append: false,
all: false,
})
.await
.map_err(|e| format!("deselect networks: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn select_all_networks(state: State<'_, AppState>) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.select_networks(proto::SelectNetworksRequest {
network_i_ds: vec![],
append: false,
all: true,
})
.await
.map_err(|e| format!("select all networks: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn deselect_all_networks(state: State<'_, AppState>) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
client
.deselect_networks(proto::SelectNetworksRequest {
network_i_ds: vec![],
append: false,
all: true,
})
.await
.map_err(|e| format!("deselect all networks: {}", e))?;
Ok(())
}

View File

@@ -0,0 +1,91 @@
use serde::Serialize;
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PeerInfo {
pub ip: String,
pub pub_key: String,
pub fqdn: String,
pub conn_status: String,
pub conn_status_update: String,
pub relayed: bool,
pub relay_address: String,
pub latency_ms: f64,
pub bytes_rx: i64,
pub bytes_tx: i64,
pub rosenpass_enabled: bool,
pub networks: Vec<String>,
pub last_handshake: String,
pub local_ice_type: String,
pub remote_ice_type: String,
pub local_endpoint: String,
pub remote_endpoint: String,
}
fn format_timestamp(ts: &Option<prost_types::Timestamp>) -> String {
match ts {
Some(t) => {
// Simple RFC3339-like formatting
let secs = t.seconds;
let nanos = t.nanos;
format!("{}:{}", secs, nanos)
}
None => String::new(),
}
}
#[tauri::command]
pub async fn get_peers(state: State<'_, AppState>) -> Result<Vec<PeerInfo>, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.status(proto::StatusRequest {
get_full_peer_status: true,
should_run_probes: false,
wait_for_ready: None,
})
.await
.map_err(|e| format!("status rpc: {}", e))?
.into_inner();
let peers = match resp.full_status {
Some(ref full) => &full.peers,
None => return Ok(vec![]),
};
let result: Vec<PeerInfo> = peers
.iter()
.map(|p| {
let latency_ms = p
.latency
.as_ref()
.map(|d| d.seconds as f64 * 1000.0 + d.nanos as f64 / 1_000_000.0)
.unwrap_or(0.0);
PeerInfo {
ip: p.ip.clone(),
pub_key: p.pub_key.clone(),
fqdn: p.fqdn.clone(),
conn_status: p.conn_status.clone(),
conn_status_update: format_timestamp(&p.conn_status_update),
relayed: p.relayed,
relay_address: p.relay_address.clone(),
latency_ms,
bytes_rx: p.bytes_rx,
bytes_tx: p.bytes_tx,
rosenpass_enabled: p.rosenpass_enabled,
networks: p.networks.clone(),
last_handshake: format_timestamp(&p.last_wireguard_handshake),
local_ice_type: p.local_ice_candidate_type.clone(),
remote_ice_type: p.remote_ice_candidate_type.clone(),
local_endpoint: p.local_ice_candidate_endpoint.clone(),
remote_endpoint: p.remote_ice_candidate_endpoint.clone(),
}
})
.collect();
Ok(result)
}

View File

@@ -0,0 +1,135 @@
use serde::Serialize;
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ProfileInfo {
pub name: String,
pub is_active: bool,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ActiveProfileInfo {
pub profile_name: String,
pub username: String,
pub email: String,
}
fn current_username() -> Result<String, String> {
#[cfg(unix)]
{
std::env::var("USER")
.or_else(|_| std::env::var("LOGNAME"))
.map_err(|_| "could not determine current user".to_string())
}
#[cfg(windows)]
{
std::env::var("USERNAME")
.map_err(|_| "could not determine current user".to_string())
}
}
#[tauri::command]
pub async fn list_profiles(state: State<'_, AppState>) -> Result<Vec<ProfileInfo>, String> {
let username = current_username()?;
let mut client = state.grpc.get_client().await?;
let resp = client
.list_profiles(proto::ListProfilesRequest { username })
.await
.map_err(|e| format!("list profiles rpc: {}", e))?
.into_inner();
Ok(resp
.profiles
.iter()
.map(|p| ProfileInfo {
name: p.name.clone(),
is_active: p.is_active,
})
.collect())
}
#[tauri::command]
pub async fn get_active_profile(state: State<'_, AppState>) -> Result<ActiveProfileInfo, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.get_active_profile(proto::GetActiveProfileRequest {})
.await
.map_err(|e| format!("get active profile rpc: {}", e))?
.into_inner();
Ok(ActiveProfileInfo {
profile_name: resp.profile_name,
username: resp.username,
email: String::new(),
})
}
#[tauri::command]
pub async fn switch_profile(
state: State<'_, AppState>,
profile_name: String,
) -> Result<(), String> {
let username = current_username()?;
let mut client = state.grpc.get_client().await?;
client
.switch_profile(proto::SwitchProfileRequest {
profile_name: Some(profile_name),
username: Some(username),
})
.await
.map_err(|e| format!("switch profile: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn add_profile(
state: State<'_, AppState>,
profile_name: String,
) -> Result<(), String> {
let username = current_username()?;
let mut client = state.grpc.get_client().await?;
client
.add_profile(proto::AddProfileRequest {
profile_name,
username,
})
.await
.map_err(|e| format!("add profile: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn remove_profile(
state: State<'_, AppState>,
profile_name: String,
) -> Result<(), String> {
let username = current_username()?;
let mut client = state.grpc.get_client().await?;
client
.remove_profile(proto::RemoveProfileRequest {
profile_name,
username,
})
.await
.map_err(|e| format!("remove profile: {}", e))?;
Ok(())
}
#[tauri::command]
pub async fn logout(state: State<'_, AppState>, profile_name: String) -> Result<(), String> {
let username = current_username()?;
let mut client = state.grpc.get_client().await?;
client
.logout(proto::LogoutRequest {
profile_name: Some(profile_name),
username: Some(username),
})
.await
.map_err(|e| format!("logout: {}", e))?;
Ok(())
}

View File

@@ -0,0 +1,147 @@
use serde::{Deserialize, Serialize};
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ConfigInfo {
pub management_url: String,
pub admin_url: String,
pub pre_shared_key: String,
pub interface_name: String,
pub wireguard_port: i64,
pub disable_auto_connect: bool,
pub server_ssh_allowed: bool,
pub rosenpass_enabled: bool,
pub rosenpass_permissive: bool,
pub lazy_connection_enabled: bool,
pub block_inbound: bool,
pub disable_notifications: bool,
}
#[tauri::command]
pub async fn get_config(state: State<'_, AppState>) -> Result<ConfigInfo, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.get_config(proto::GetConfigRequest {
profile_name: String::new(),
username: String::new(),
})
.await
.map_err(|e| format!("get config rpc: {}", e))?
.into_inner();
Ok(ConfigInfo {
management_url: resp.management_url,
admin_url: resp.admin_url,
pre_shared_key: resp.pre_shared_key,
interface_name: resp.interface_name,
wireguard_port: resp.wireguard_port,
disable_auto_connect: resp.disable_auto_connect,
server_ssh_allowed: resp.server_ssh_allowed,
rosenpass_enabled: resp.rosenpass_enabled,
rosenpass_permissive: resp.rosenpass_permissive,
lazy_connection_enabled: resp.lazy_connection_enabled,
block_inbound: resp.block_inbound,
disable_notifications: resp.disable_notifications,
})
}
#[tauri::command]
pub async fn set_config(state: State<'_, AppState>, cfg: ConfigInfo) -> Result<(), String> {
let mut client = state.grpc.get_client().await?;
let req = proto::SetConfigRequest {
username: String::new(),
profile_name: String::new(),
management_url: cfg.management_url,
admin_url: cfg.admin_url,
rosenpass_enabled: Some(cfg.rosenpass_enabled),
interface_name: Some(cfg.interface_name),
wireguard_port: Some(cfg.wireguard_port),
optional_pre_shared_key: Some(cfg.pre_shared_key),
disable_auto_connect: Some(cfg.disable_auto_connect),
server_ssh_allowed: Some(cfg.server_ssh_allowed),
rosenpass_permissive: Some(cfg.rosenpass_permissive),
disable_notifications: Some(cfg.disable_notifications),
lazy_connection_enabled: Some(cfg.lazy_connection_enabled),
block_inbound: Some(cfg.block_inbound),
// Fields we don't expose in the UI:
network_monitor: None,
disable_client_routes: None,
disable_server_routes: None,
disable_dns: None,
disable_firewall: None,
block_lan_access: None,
nat_external_i_ps: vec![],
clean_nat_external_i_ps: false,
custom_dns_address: vec![],
extra_i_face_blacklist: vec![],
dns_labels: vec![],
clean_dns_labels: false,
dns_route_interval: None,
mtu: None,
enable_ssh_root: None,
enable_sshsftp: None,
enable_ssh_local_port_forwarding: None,
enable_ssh_remote_port_forwarding: None,
disable_ssh_auth: None,
ssh_jwt_cache_ttl: None,
};
client
.set_config(req)
.await
.map_err(|e| format!("set config: {}", e))?;
Ok(())
}
// Toggle helpers - each fetches config, modifies one field, and saves.
#[tauri::command]
pub async fn toggle_ssh(state: State<'_, AppState>, enabled: bool) -> Result<(), String> {
let mut cfg = get_config(state.clone()).await?;
cfg.server_ssh_allowed = enabled;
set_config(state, cfg).await
}
#[tauri::command]
pub async fn toggle_auto_connect(state: State<'_, AppState>, enabled: bool) -> Result<(), String> {
let mut cfg = get_config(state.clone()).await?;
cfg.disable_auto_connect = !enabled;
set_config(state, cfg).await
}
#[tauri::command]
pub async fn toggle_rosenpass(state: State<'_, AppState>, enabled: bool) -> Result<(), String> {
let mut cfg = get_config(state.clone()).await?;
cfg.rosenpass_enabled = enabled;
set_config(state, cfg).await
}
#[tauri::command]
pub async fn toggle_lazy_conn(state: State<'_, AppState>, enabled: bool) -> Result<(), String> {
let mut cfg = get_config(state.clone()).await?;
cfg.lazy_connection_enabled = enabled;
set_config(state, cfg).await
}
#[tauri::command]
pub async fn toggle_block_inbound(
state: State<'_, AppState>,
enabled: bool,
) -> Result<(), String> {
let mut cfg = get_config(state.clone()).await?;
cfg.block_inbound = enabled;
set_config(state, cfg).await
}
#[tauri::command]
pub async fn toggle_notifications(
state: State<'_, AppState>,
enabled: bool,
) -> Result<(), String> {
let mut cfg = get_config(state.clone()).await?;
cfg.disable_notifications = !enabled;
set_config(state, cfg).await
}

View File

@@ -0,0 +1,43 @@
use serde::Serialize;
use tauri::State;
use crate::proto;
use crate::state::AppState;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct InstallerResult {
pub success: bool,
pub error_msg: String,
}
#[tauri::command]
pub async fn trigger_update() -> Result<(), String> {
// Stub - same as the Go implementation
Ok(())
}
#[tauri::command]
pub async fn get_installer_result(state: State<'_, AppState>) -> Result<InstallerResult, String> {
let mut client = state.grpc.get_client().await?;
let resp = client
.get_installer_result(proto::InstallerResultRequest {})
.await;
match resp {
Ok(r) => {
let inner = r.into_inner();
Ok(InstallerResult {
success: inner.success,
error_msg: inner.error_msg,
})
}
Err(_) => {
// Daemon may have restarted during update - treat as success
Ok(InstallerResult {
success: true,
error_msg: String::new(),
})
}
}
}

View File

@@ -0,0 +1,91 @@
use std::time::Duration;
use tauri::{AppHandle, Emitter};
use crate::grpc::GrpcClient;
use crate::proto;
/// Start the daemon event subscription loop with exponential backoff.
pub fn start_event_subscription(app: AppHandle, grpc: GrpcClient) {
tauri::async_runtime::spawn(async move {
let mut backoff = Duration::from_secs(1);
let max_backoff = Duration::from_secs(10);
loop {
match stream_events(&app, &grpc).await {
Ok(()) => {
backoff = Duration::from_secs(1);
}
Err(e) => {
log::warn!("event stream ended: {}", e);
}
}
tokio::time::sleep(backoff).await;
backoff = (backoff * 2).min(max_backoff);
}
});
}
async fn stream_events(app: &AppHandle, grpc: &GrpcClient) -> Result<(), String> {
let mut client = grpc.get_client().await?;
let mut stream = client
.subscribe_events(proto::SubscribeRequest {})
.await
.map_err(|e| format!("subscribe events: {}", e))?
.into_inner();
log::info!("subscribed to daemon events");
while let Some(event) = stream
.message()
.await
.map_err(|e| format!("receive event: {}", e))?
{
handle_event(app, &event);
}
log::info!("event stream ended");
Ok(())
}
fn handle_event(app: &AppHandle, event: &proto::SystemEvent) {
// Send desktop notification for events with user_message
if !event.user_message.is_empty() {
let title = get_event_title(event);
let mut body = event.user_message.clone();
if let Some(id) = event.metadata.get("id") {
body.push_str(&format!(" ID: {}", id));
}
if let Err(e) = notify_rust::Notification::new()
.summary(&title)
.body(&body)
.appname("NetBird")
.show()
{
log::debug!("notification failed: {}", e);
}
}
// Emit to frontend
let _ = app.emit("daemon-event", &event.user_message);
}
fn get_event_title(event: &proto::SystemEvent) -> String {
let prefix = match proto::system_event::Severity::try_from(event.severity) {
Ok(proto::system_event::Severity::Critical) => "Critical",
Ok(proto::system_event::Severity::Error) => "Error",
Ok(proto::system_event::Severity::Warning) => "Warning",
_ => "Info",
};
let category = match proto::system_event::Category::try_from(event.category) {
Ok(proto::system_event::Category::Dns) => "DNS",
Ok(proto::system_event::Category::Network) => "Network",
Ok(proto::system_event::Category::Authentication) => "Authentication",
Ok(proto::system_event::Category::Connectivity) => "Connectivity",
_ => "System",
};
format!("{}: {}", prefix, category)
}

View File

@@ -0,0 +1,104 @@
use std::sync::Arc;
use tokio::sync::Mutex;
use tonic::transport::{Channel, Endpoint, Uri};
use crate::proto::daemon_service_client::DaemonServiceClient;
/// GrpcClient manages a persistent gRPC connection to the NetBird daemon.
#[derive(Clone)]
pub struct GrpcClient {
addr: String,
client: Arc<Mutex<Option<DaemonServiceClient<Channel>>>>,
}
impl GrpcClient {
pub fn new(addr: String) -> Self {
Self {
addr,
client: Arc::new(Mutex::new(None)),
}
}
/// Returns a cached DaemonServiceClient, creating the connection on first use.
/// If the connection fails or was previously dropped, a new connection is attempted.
pub async fn get_client(&self) -> Result<DaemonServiceClient<Channel>, String> {
let mut guard = self.client.lock().await;
if let Some(ref client) = *guard {
return Ok(client.clone());
}
let channel = self.connect().await?;
let client = DaemonServiceClient::new(channel);
*guard = Some(client.clone());
log::info!("gRPC connection established to {}", self.addr);
Ok(client)
}
/// Clears the cached client so the next call to get_client will reconnect.
pub async fn reset(&self) {
let mut guard = self.client.lock().await;
*guard = None;
}
async fn connect(&self) -> Result<Channel, String> {
let addr = &self.addr;
#[cfg(unix)]
if addr.starts_with("unix://") {
return self.connect_unix(addr).await;
}
// TCP connection
let target = if addr.starts_with("tcp://") {
addr.strip_prefix("tcp://").unwrap_or(addr)
} else {
addr.as_str()
};
let uri = format!("http://{}", target);
Endpoint::from_shared(uri)
.map_err(|e| format!("invalid endpoint: {}", e))?
.connect()
.await
.map_err(|e| format!("connect tcp: {}", e))
}
#[cfg(unix)]
async fn connect_unix(&self, addr: &str) -> Result<Channel, String> {
let path = addr
.strip_prefix("unix://")
.unwrap_or(addr)
.to_string();
// tonic requires a valid URI even for UDS; the actual connection
// is made by the connector below, so the URI authority is ignored.
let channel = Endpoint::try_from("http://[::]:50051")
.map_err(|e| format!("invalid endpoint: {}", e))?
.connect_with_connector(tower::service_fn(move |_: Uri| {
let path = path.clone();
async move {
let stream = tokio::net::UnixStream::connect(&path).await?;
Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new(stream))
}
}))
.await
.map_err(|e| format!("connect unix: {}", e))?;
Ok(channel)
}
/// Close the connection (drop the cached client).
pub async fn close(&self) {
let mut guard = self.client.lock().await;
*guard = None;
}
}
/// Returns the default daemon address for the current platform.
pub fn default_daemon_addr() -> String {
if cfg!(windows) {
"tcp://127.0.0.1:41731".to_string()
} else {
"unix:///var/run/netbird.sock".to_string()
}
}

View File

@@ -0,0 +1,106 @@
// Prevents additional console window on Windows in release
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod commands;
mod events;
mod grpc;
mod proto;
mod state;
mod tray;
use tauri::Manager;
use grpc::{default_daemon_addr, GrpcClient};
use state::AppState;
fn main() {
env_logger::init();
// Linux WebKit workaround
#[cfg(target_os = "linux")]
{
std::env::set_var("WEBKIT_DISABLE_DMABUF_RENDERER", "1");
}
let daemon_addr =
std::env::var("NETBIRD_DAEMON_ADDR").unwrap_or_else(|_| default_daemon_addr());
log::info!("NetBird UI starting, daemon address: {}", daemon_addr);
let grpc_client = GrpcClient::new(daemon_addr.clone());
tauri::Builder::default()
.plugin(tauri_plugin_single_instance::init(|app, _args, _cwd| {
// Focus existing window when second instance is launched
if let Some(win) = app.get_webview_window("main") {
let _ = win.show();
let _ = win.set_focus();
}
}))
.manage(AppState {
grpc: grpc_client.clone(),
})
.invoke_handler(tauri::generate_handler![
// Connection
commands::connection::get_status,
commands::connection::connect,
commands::connection::disconnect,
// Settings
commands::settings::get_config,
commands::settings::set_config,
commands::settings::toggle_ssh,
commands::settings::toggle_auto_connect,
commands::settings::toggle_rosenpass,
commands::settings::toggle_lazy_conn,
commands::settings::toggle_block_inbound,
commands::settings::toggle_notifications,
// Network
commands::network::list_networks,
commands::network::list_overlapping_networks,
commands::network::list_exit_nodes,
commands::network::select_network,
commands::network::deselect_network,
commands::network::select_networks,
commands::network::deselect_networks,
commands::network::select_all_networks,
commands::network::deselect_all_networks,
// Peers
commands::peers::get_peers,
// Profile
commands::profile::list_profiles,
commands::profile::get_active_profile,
commands::profile::switch_profile,
commands::profile::add_profile,
commands::profile::remove_profile,
commands::profile::logout,
// Debug
commands::debug::create_debug_bundle,
commands::debug::get_log_level,
commands::debug::set_log_level,
// Update
commands::update::trigger_update,
commands::update::get_installer_result,
])
.setup(|app| {
let handle = app.handle().clone();
// Setup system tray
if let Err(e) = tray::setup_tray(&handle) {
log::error!("tray setup failed: {}", e);
}
// Start daemon event subscription
events::start_event_subscription(handle, grpc_client);
Ok(())
})
.on_window_event(|window, event| {
// Hide instead of quit when user closes the window
if let tauri::WindowEvent::CloseRequested { api, .. } = event {
api.prevent_close();
let _ = window.hide();
}
})
.run(tauri::generate_context!())
.expect("error running tauri application");
}

View File

@@ -0,0 +1 @@
tonic::include_proto!("daemon");

View File

@@ -0,0 +1,6 @@
use crate::grpc::GrpcClient;
/// Application state shared across all Tauri commands.
pub struct AppState {
pub grpc: GrpcClient,
}

View File

@@ -0,0 +1,420 @@
use std::sync::Arc;
use std::time::Duration;
use tauri::image::Image;
use tauri::menu::{CheckMenuItem, CheckMenuItemBuilder, MenuBuilder, MenuItem, MenuItemBuilder, SubmenuBuilder};
use tauri::tray::TrayIconBuilder;
use tauri::{AppHandle, Emitter, Manager};
use tokio::sync::Mutex;
use crate::commands::connection::StatusInfo;
use crate::grpc::GrpcClient;
use crate::proto;
use crate::state::AppState;
const STATUS_POLL_INTERVAL: Duration = Duration::from_secs(5);
// Icon bytes embedded at compile time
const ICON_DISCONNECTED: &[u8] = include_bytes!("../icons/netbird-systemtray-disconnected.png");
const ICON_CONNECTED: &[u8] = include_bytes!("../icons/netbird-systemtray-connected.png");
const ICON_CONNECTING: &[u8] = include_bytes!("../icons/netbird-systemtray-connecting.png");
const ICON_ERROR: &[u8] = include_bytes!("../icons/netbird-systemtray-error.png");
fn icon_for_status(status: &str) -> &'static [u8] {
match status {
"Connected" => ICON_CONNECTED,
"Connecting" => ICON_CONNECTING,
"Disconnected" | "" => ICON_DISCONNECTED,
_ => ICON_ERROR,
}
}
/// Holds references to menu items we need to update at runtime.
pub struct TrayMenuItems {
pub status_item: MenuItem<tauri::Wry>,
pub ssh_item: CheckMenuItem<tauri::Wry>,
pub auto_connect_item: CheckMenuItem<tauri::Wry>,
pub rosenpass_item: CheckMenuItem<tauri::Wry>,
pub lazy_conn_item: CheckMenuItem<tauri::Wry>,
pub block_inbound_item: CheckMenuItem<tauri::Wry>,
pub notifications_item: CheckMenuItem<tauri::Wry>,
}
pub type SharedTrayMenuItems = Arc<Mutex<Option<TrayMenuItems>>>;
pub fn setup_tray(app: &AppHandle) -> Result<(), Box<dyn std::error::Error>> {
let grpc = app.state::<AppState>().grpc.clone();
// Build the tray menu
let status_item = MenuItemBuilder::with_id("status", "Status: Disconnected")
.enabled(false)
.build(app)?;
let connect_item = MenuItemBuilder::with_id("connect", "Connect").build(app)?;
let disconnect_item = MenuItemBuilder::with_id("disconnect", "Disconnect").build(app)?;
let ssh_item = CheckMenuItemBuilder::with_id("toggle_ssh", "Allow SSH connections")
.checked(false)
.build(app)?;
let auto_connect_item =
CheckMenuItemBuilder::with_id("toggle_auto_connect", "Connect automatically when service starts")
.checked(false)
.build(app)?;
let rosenpass_item =
CheckMenuItemBuilder::with_id("toggle_rosenpass", "Enable post-quantum security via Rosenpass")
.checked(false)
.build(app)?;
let lazy_conn_item =
CheckMenuItemBuilder::with_id("toggle_lazy_conn", "[Experimental] Enable lazy connections")
.checked(false)
.build(app)?;
let block_inbound_item =
CheckMenuItemBuilder::with_id("toggle_block_inbound", "Block inbound connections")
.checked(false)
.build(app)?;
let notifications_item =
CheckMenuItemBuilder::with_id("toggle_notifications", "Enable notifications")
.checked(true)
.build(app)?;
// Exit node submenu
let exit_node_menu = SubmenuBuilder::with_id(app, "exit_node", "Exit Node")
.item(
&MenuItemBuilder::with_id("no_exit_nodes", "No exit nodes")
.enabled(false)
.build(app)?,
)
.build()?;
// Navigation items
let nav_status = MenuItemBuilder::with_id("nav_status", "Status").build(app)?;
let nav_settings = MenuItemBuilder::with_id("nav_settings", "Settings").build(app)?;
let nav_peers = MenuItemBuilder::with_id("nav_peers", "Peers").build(app)?;
let nav_networks = MenuItemBuilder::with_id("nav_networks", "Networks").build(app)?;
let nav_profiles = MenuItemBuilder::with_id("nav_profiles", "Profiles").build(app)?;
let nav_debug = MenuItemBuilder::with_id("nav_debug", "Debug").build(app)?;
let nav_update = MenuItemBuilder::with_id("nav_update", "Update").build(app)?;
let quit_item = MenuItemBuilder::with_id("quit", "Quit").build(app)?;
let menu = MenuBuilder::new(app)
.item(&status_item)
.separator()
.item(&connect_item)
.item(&disconnect_item)
.separator()
.item(&ssh_item)
.item(&auto_connect_item)
.item(&rosenpass_item)
.item(&lazy_conn_item)
.item(&block_inbound_item)
.item(&notifications_item)
.separator()
.item(&exit_node_menu)
.separator()
.item(&nav_status)
.item(&nav_settings)
.item(&nav_peers)
.item(&nav_networks)
.item(&nav_profiles)
.item(&nav_debug)
.item(&nav_update)
.separator()
.item(&quit_item)
.build()?;
// Store menu item references for runtime updates
let menu_items: SharedTrayMenuItems = Arc::new(Mutex::new(Some(TrayMenuItems {
status_item,
ssh_item: ssh_item.clone(),
auto_connect_item: auto_connect_item.clone(),
rosenpass_item: rosenpass_item.clone(),
lazy_conn_item: lazy_conn_item.clone(),
block_inbound_item: block_inbound_item.clone(),
notifications_item: notifications_item.clone(),
})));
app.manage(menu_items.clone());
let _tray = TrayIconBuilder::with_id("main")
.icon(Image::from_bytes(ICON_DISCONNECTED)?)
.icon_as_template(cfg!(target_os = "macos"))
.menu(&menu)
.on_menu_event({
let app_handle = app.clone();
let grpc = grpc.clone();
move |_app, event| {
let id = event.id().as_ref();
let app_handle = app_handle.clone();
let grpc = grpc.clone();
match id {
"connect" => {
tauri::async_runtime::spawn(async move {
let mut client = match grpc.get_client().await {
Ok(c) => c,
Err(e) => {
log::error!("connect: {}", e);
return;
}
};
if let Err(e) = client
.up(proto::UpRequest {
profile_name: None,
username: None,
auto_update: None,
})
.await
{
log::error!("connect: {}", e);
}
});
}
"disconnect" => {
tauri::async_runtime::spawn(async move {
let mut client = match grpc.get_client().await {
Ok(c) => c,
Err(e) => {
log::error!("disconnect: {}", e);
return;
}
};
if let Err(e) = client.down(proto::DownRequest {}).await {
log::error!("disconnect: {}", e);
}
});
}
"toggle_ssh" | "toggle_auto_connect" | "toggle_rosenpass"
| "toggle_lazy_conn" | "toggle_block_inbound" | "toggle_notifications" => {
let toggle_id = id.to_string();
tauri::async_runtime::spawn(async move {
handle_toggle(&app_handle, &grpc, &toggle_id).await;
});
}
s if s.starts_with("nav_") => {
let path = match s {
"nav_status" => "/",
"nav_settings" => "/settings",
"nav_peers" => "/peers",
"nav_networks" => "/networks",
"nav_profiles" => "/profiles",
"nav_debug" => "/debug",
"nav_update" => "/update",
_ => return,
};
let _ = app_handle.emit("navigate", path);
if let Some(win) = app_handle.get_webview_window("main") {
let _ = win.show();
let _ = win.set_focus();
}
}
"quit" => {
app_handle.exit(0);
}
_ => {}
}
}
})
.build(app)?;
// Refresh toggle states
let app_handle = app.clone();
let grpc_clone = grpc.clone();
tauri::async_runtime::spawn(async move {
refresh_toggle_states(&app_handle, &grpc_clone).await;
});
// Start status polling
let app_handle = app.clone();
tauri::async_runtime::spawn(async move {
poll_status(app_handle, grpc).await;
});
Ok(())
}
async fn poll_status(app: AppHandle, grpc: GrpcClient) {
loop {
tokio::time::sleep(STATUS_POLL_INTERVAL).await;
let mut client = match grpc.get_client().await {
Ok(c) => c,
Err(e) => {
log::warn!("pollStatus: {}", e);
grpc.reset().await;
continue;
}
};
let resp = match client
.status(proto::StatusRequest {
get_full_peer_status: true,
should_run_probes: false,
wait_for_ready: None,
})
.await
{
Ok(r) => r.into_inner(),
Err(e) => {
log::warn!("pollStatus: status rpc: {}", e);
grpc.reset().await;
continue;
}
};
let mut info = StatusInfo {
status: resp.status.clone(),
ip: String::new(),
public_key: String::new(),
fqdn: String::new(),
connected_peers: 0,
};
if let Some(ref full) = resp.full_status {
if let Some(ref lp) = full.local_peer_state {
info.ip = lp.ip.clone();
info.public_key = lp.pub_key.clone();
info.fqdn = lp.fqdn.clone();
}
info.connected_peers = full.peers.len();
}
// Update tray label
let label = if info.ip.is_empty() {
format!("Status: {}", info.status)
} else {
format!("Status: {} ({})", info.status, info.ip)
};
// Update tray menu status label via stored reference
let menu_items = app.state::<SharedTrayMenuItems>();
if let Some(ref items) = *menu_items.lock().await {
let _ = items.status_item.set_text(&label);
}
// Update tray icon
if let Some(tray) = app.tray_by_id("main") {
let icon_bytes = icon_for_status(&info.status);
if let Ok(icon) = Image::from_bytes(icon_bytes) {
let _ = tray.set_icon(Some(icon));
}
}
// Emit status-changed event to frontend
let _ = app.emit("status-changed", &info);
}
}
async fn handle_toggle(app: &AppHandle, grpc: &GrpcClient, toggle_id: &str) {
let mut client = match grpc.get_client().await {
Ok(c) => c,
Err(e) => {
log::error!("toggle: get client: {}", e);
return;
}
};
// Get current config
let cfg = match client
.get_config(proto::GetConfigRequest {
profile_name: String::new(),
username: String::new(),
})
.await
{
Ok(r) => r.into_inner(),
Err(e) => {
log::error!("toggle: get config: {}", e);
return;
}
};
// Build set config request based on which toggle was clicked
let mut req = proto::SetConfigRequest {
username: String::new(),
profile_name: String::new(),
management_url: cfg.management_url,
admin_url: cfg.admin_url,
rosenpass_enabled: Some(cfg.rosenpass_enabled),
interface_name: Some(cfg.interface_name),
wireguard_port: Some(cfg.wireguard_port),
optional_pre_shared_key: Some(cfg.pre_shared_key),
disable_auto_connect: Some(cfg.disable_auto_connect),
server_ssh_allowed: Some(cfg.server_ssh_allowed),
rosenpass_permissive: Some(cfg.rosenpass_permissive),
disable_notifications: Some(cfg.disable_notifications),
lazy_connection_enabled: Some(cfg.lazy_connection_enabled),
block_inbound: Some(cfg.block_inbound),
network_monitor: None,
disable_client_routes: None,
disable_server_routes: None,
disable_dns: None,
disable_firewall: None,
block_lan_access: None,
nat_external_i_ps: vec![],
clean_nat_external_i_ps: false,
custom_dns_address: vec![],
extra_i_face_blacklist: vec![],
dns_labels: vec![],
clean_dns_labels: false,
dns_route_interval: None,
mtu: None,
enable_ssh_root: None,
enable_sshsftp: None,
enable_ssh_local_port_forwarding: None,
enable_ssh_remote_port_forwarding: None,
disable_ssh_auth: None,
ssh_jwt_cache_ttl: None,
};
match toggle_id {
"toggle_ssh" => req.server_ssh_allowed = Some(!cfg.server_ssh_allowed),
"toggle_auto_connect" => req.disable_auto_connect = Some(!cfg.disable_auto_connect),
"toggle_rosenpass" => req.rosenpass_enabled = Some(!cfg.rosenpass_enabled),
"toggle_lazy_conn" => req.lazy_connection_enabled = Some(!cfg.lazy_connection_enabled),
"toggle_block_inbound" => req.block_inbound = Some(!cfg.block_inbound),
"toggle_notifications" => req.disable_notifications = Some(!cfg.disable_notifications),
_ => return,
}
if let Err(e) = client.set_config(req).await {
log::error!("toggle {}: set config: {}", toggle_id, e);
}
// Refresh toggle states after change
refresh_toggle_states(app, grpc).await;
}
async fn refresh_toggle_states(app: &AppHandle, grpc: &GrpcClient) {
let mut client = match grpc.get_client().await {
Ok(c) => c,
Err(e) => {
log::debug!("refresh toggles: {}", e);
return;
}
};
let cfg = match client
.get_config(proto::GetConfigRequest {
profile_name: String::new(),
username: String::new(),
})
.await
{
Ok(r) => r.into_inner(),
Err(e) => {
log::debug!("refresh toggles: get config: {}", e);
return;
}
};
let menu_items = app.state::<SharedTrayMenuItems>();
let guard = menu_items.lock().await;
if let Some(ref items) = *guard {
let _ = items.ssh_item.set_checked(cfg.server_ssh_allowed);
let _ = items.auto_connect_item.set_checked(!cfg.disable_auto_connect);
let _ = items.rosenpass_item.set_checked(cfg.rosenpass_enabled);
let _ = items.lazy_conn_item.set_checked(cfg.lazy_connection_enabled);
let _ = items.block_inbound_item.set_checked(cfg.block_inbound);
let _ = items.notifications_item.set_checked(!cfg.disable_notifications);
}
}

View File

@@ -0,0 +1,36 @@
{
"$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/dev/crates/tauri-utils/schema.json",
"productName": "NetBird",
"identifier": "io.netbird.client",
"version": "0.1.0",
"build": {
"frontendDist": "../frontend/dist",
"beforeBuildCommand": "cd ../frontend && npm run build"
},
"app": {
"windows": [
{
"title": "NetBird",
"width": 900,
"height": 650,
"visible": false,
"resizable": true,
"skipTaskbar": true
}
],
"trayIcon": {
"iconPath": "icons/netbird-systemtray-disconnected.png",
"iconAsTemplate": true
},
"security": {
"csp": null
}
},
"bundle": {
"active": true,
"targets": "all",
"icon": [
"icons/netbird.png"
]
}
}