mirror of
https://github.com/netbirdio/netbird.git
synced 2026-05-14 04:39:54 +00:00
[client/ui] Replace fyne UI with Wails (rename ui-wails to ui)
Removes the legacy fyne-based client/ui implementation and renames the Wails replacement (client/ui-wails) to take its place at client/ui. Go imports, frontend bindings, CI workflows, goreleaser configs and the windows .syso icon path are updated to follow the rename.
This commit is contained in:
13
client/ui/services/conn.go
Normal file
13
client/ui/services/conn.go
Normal file
@@ -0,0 +1,13 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import "github.com/netbirdio/netbird/client/proto"
|
||||
|
||||
// DaemonConn returns a lazy gRPC client to the NetBird daemon.
|
||||
// All services receive a DaemonConn so they share a single connection.
|
||||
type DaemonConn interface {
|
||||
Client() (proto.DaemonServiceClient, error)
|
||||
}
|
||||
|
||||
func ptrStr(s string) *string { return &s }
|
||||
205
client/ui/services/connection.go
Normal file
205
client/ui/services/connection.go
Normal file
@@ -0,0 +1,205 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// LoginParams carries the fields the UI sets when starting a login.
|
||||
type LoginParams struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
ManagementURL string `json:"managementUrl"`
|
||||
SetupKey string `json:"setupKey"`
|
||||
PreSharedKey string `json:"preSharedKey"`
|
||||
Hostname string `json:"hostname"`
|
||||
Hint string `json:"hint"`
|
||||
}
|
||||
|
||||
// LoginResult is the daemon's reply to a Login call.
|
||||
type LoginResult struct {
|
||||
NeedsSSOLogin bool `json:"needsSsoLogin"`
|
||||
UserCode string `json:"userCode"`
|
||||
VerificationURI string `json:"verificationUri"`
|
||||
VerificationURIComplete string `json:"verificationUriComplete"`
|
||||
}
|
||||
|
||||
// WaitSSOParams carries the fields the UI passes to WaitSSOLogin.
|
||||
type WaitSSOParams struct {
|
||||
UserCode string `json:"userCode"`
|
||||
Hostname string `json:"hostname"`
|
||||
}
|
||||
|
||||
// UpParams selects the profile the daemon should bring up.
|
||||
type UpParams struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// LogoutParams selects the profile the daemon should log out.
|
||||
type LogoutParams struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// Connection groups the daemon RPCs that drive login / connect / disconnect.
|
||||
type Connection struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewConnection(conn DaemonConn) *Connection {
|
||||
return &Connection{conn: conn}
|
||||
}
|
||||
|
||||
func (s *Connection) Login(ctx context.Context, p LoginParams) (LoginResult, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return LoginResult{}, err
|
||||
}
|
||||
|
||||
// Reset the daemon's connection loop before kicking off a new login.
|
||||
// If a previous Login left a WaitSSOLogin pending (user closed the
|
||||
// browser without completing the flow), the daemon stays parked on the
|
||||
// old UserCode and replies with "invalid setup-key or no sso information
|
||||
// provided" to a fresh Login. Calling Down first dislodges that state;
|
||||
// we ignore the error since Down on an already-idle daemon is a no-op.
|
||||
if _, derr := cli.Down(ctx, &proto.DownRequest{}); derr != nil {
|
||||
// Down failed — likely because the daemon is already idle. Continue.
|
||||
_ = derr
|
||||
}
|
||||
|
||||
// Mirror the Fyne client's defaulting: when the frontend doesn't supply
|
||||
// profile / username, fall back to the daemon's active profile and the
|
||||
// current OS user. The flag matches the Fyne ui's IsUnixDesktopClient
|
||||
// condition so the daemon knows we can render an SSO browser flow.
|
||||
profileName := p.ProfileName
|
||||
username := p.Username
|
||||
if profileName == "" {
|
||||
if active, aerr := cli.GetActiveProfile(ctx, &proto.GetActiveProfileRequest{}); aerr == nil {
|
||||
profileName = active.GetProfileName()
|
||||
if username == "" {
|
||||
username = active.GetUsername()
|
||||
}
|
||||
}
|
||||
}
|
||||
if username == "" {
|
||||
if u, uerr := user.Current(); uerr == nil {
|
||||
username = u.Username
|
||||
}
|
||||
}
|
||||
|
||||
req := &proto.LoginRequest{
|
||||
ManagementUrl: p.ManagementURL,
|
||||
SetupKey: p.SetupKey,
|
||||
Hostname: p.Hostname,
|
||||
IsUnixDesktopClient: runtime.GOOS == "linux",
|
||||
}
|
||||
if profileName != "" {
|
||||
req.ProfileName = ptrStr(profileName)
|
||||
}
|
||||
if username != "" {
|
||||
req.Username = ptrStr(username)
|
||||
}
|
||||
if p.PreSharedKey != "" {
|
||||
req.OptionalPreSharedKey = ptrStr(p.PreSharedKey)
|
||||
}
|
||||
if p.Hint != "" {
|
||||
req.Hint = ptrStr(p.Hint)
|
||||
}
|
||||
|
||||
resp, err := cli.Login(ctx, req)
|
||||
if err != nil {
|
||||
return LoginResult{}, err
|
||||
}
|
||||
return LoginResult{
|
||||
NeedsSSOLogin: resp.GetNeedsSSOLogin(),
|
||||
UserCode: resp.GetUserCode(),
|
||||
VerificationURI: resp.GetVerificationURI(),
|
||||
VerificationURIComplete: resp.GetVerificationURIComplete(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Connection) WaitSSOLogin(ctx context.Context, p WaitSSOParams) (string, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := cli.WaitSSOLogin(ctx, &proto.WaitSSOLoginRequest{
|
||||
UserCode: p.UserCode,
|
||||
Hostname: p.Hostname,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.GetEmail(), nil
|
||||
}
|
||||
|
||||
func (s *Connection) Up(ctx context.Context, p UpParams) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &proto.UpRequest{}
|
||||
if p.ProfileName != "" {
|
||||
req.ProfileName = ptrStr(p.ProfileName)
|
||||
}
|
||||
if p.Username != "" {
|
||||
req.Username = ptrStr(p.Username)
|
||||
}
|
||||
_, err = cli.Up(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Connection) Down(ctx context.Context) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cli.Down(ctx, &proto.DownRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
// OpenURL launches the user's preferred browser to display url. Mirrors the
|
||||
// Fyne client's openURL helper so the SSO flow can pop the verification page
|
||||
// the same way as the legacy UI — WebKitGTK's window.open is blocked by the
|
||||
// embedded webview, and asking the user to copy/paste defeats the point of
|
||||
// SSO. Honors $BROWSER first, then falls back to the platform default.
|
||||
func (s *Connection) OpenURL(url string) error {
|
||||
if browser := os.Getenv("BROWSER"); browser != "" {
|
||||
return exec.Command(browser, url).Start()
|
||||
}
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
|
||||
case "darwin":
|
||||
return exec.Command("open", url).Start()
|
||||
case "linux":
|
||||
return exec.Command("xdg-open", url).Start()
|
||||
default:
|
||||
return fmt.Errorf("unsupported platform")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Connection) Logout(ctx context.Context, p LogoutParams) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &proto.LogoutRequest{}
|
||||
if p.ProfileName != "" {
|
||||
req.ProfileName = ptrStr(p.ProfileName)
|
||||
}
|
||||
if p.Username != "" {
|
||||
req.Username = ptrStr(p.Username)
|
||||
}
|
||||
_, err = cli.Logout(ctx, req)
|
||||
return err
|
||||
}
|
||||
88
client/ui/services/debug.go
Normal file
88
client/ui/services/debug.go
Normal file
@@ -0,0 +1,88 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// DebugBundleParams configures what the daemon collects when generating a
|
||||
// debug bundle.
|
||||
type DebugBundleParams struct {
|
||||
Anonymize bool `json:"anonymize"`
|
||||
SystemInfo bool `json:"systemInfo"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
LogFileCount uint32 `json:"logFileCount"`
|
||||
}
|
||||
|
||||
// DebugBundleResult mirrors DebugBundleResponse — Path is set on local-only
|
||||
// bundles, UploadedKey on successful uploads, UploadFailureReason on failed
|
||||
// uploads.
|
||||
type DebugBundleResult struct {
|
||||
Path string `json:"path"`
|
||||
UploadedKey string `json:"uploadedKey"`
|
||||
UploadFailureReason string `json:"uploadFailureReason"`
|
||||
}
|
||||
|
||||
// LogLevel is a single log-level value the daemon understands ("error",
|
||||
// "warn", "info", "debug", "trace").
|
||||
type LogLevel struct {
|
||||
Level string `json:"level"`
|
||||
}
|
||||
|
||||
// Debug groups debug / log-level / packet-trace RPCs.
|
||||
type Debug struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewDebug(conn DaemonConn) *Debug {
|
||||
return &Debug{conn: conn}
|
||||
}
|
||||
|
||||
func (s *Debug) Bundle(ctx context.Context, p DebugBundleParams) (DebugBundleResult, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return DebugBundleResult{}, err
|
||||
}
|
||||
resp, err := cli.DebugBundle(ctx, &proto.DebugBundleRequest{
|
||||
Anonymize: p.Anonymize,
|
||||
SystemInfo: p.SystemInfo,
|
||||
UploadURL: p.UploadURL,
|
||||
LogFileCount: p.LogFileCount,
|
||||
})
|
||||
if err != nil {
|
||||
return DebugBundleResult{}, err
|
||||
}
|
||||
return DebugBundleResult{
|
||||
Path: resp.GetPath(),
|
||||
UploadedKey: resp.GetUploadedKey(),
|
||||
UploadFailureReason: resp.GetUploadFailureReason(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Debug) GetLogLevel(ctx context.Context) (LogLevel, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return LogLevel{}, err
|
||||
}
|
||||
resp, err := cli.GetLogLevel(ctx, &proto.GetLogLevelRequest{})
|
||||
if err != nil {
|
||||
return LogLevel{}, err
|
||||
}
|
||||
return LogLevel{Level: resp.GetLevel().String()}, nil
|
||||
}
|
||||
|
||||
func (s *Debug) SetLogLevel(ctx context.Context, lvl LogLevel) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level, ok := proto.LogLevel_value[lvl.Level]
|
||||
if !ok {
|
||||
level = int32(proto.LogLevel_INFO)
|
||||
}
|
||||
_, err = cli.SetLogLevel(ctx, &proto.SetLogLevelRequest{Level: proto.LogLevel(level)})
|
||||
return err
|
||||
}
|
||||
87
client/ui/services/forwarding.go
Normal file
87
client/ui/services/forwarding.go
Normal file
@@ -0,0 +1,87 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// PortRange describes a contiguous port range. Both ends are inclusive.
|
||||
type PortRange struct {
|
||||
Start uint32 `json:"start"`
|
||||
End uint32 `json:"end"`
|
||||
}
|
||||
|
||||
// PortInfo carries the destination or translated port for a forwarding rule.
|
||||
// Exactly one of Port or Range is populated, mirroring the daemon's oneof.
|
||||
type PortInfo struct {
|
||||
Port *uint32 `json:"port,omitempty"`
|
||||
Range *PortRange `json:"range,omitempty"`
|
||||
}
|
||||
|
||||
// ForwardingRule is one entry from the daemon's reverse-proxy table —
|
||||
// what we ship to the frontend's "exposed services" view.
|
||||
type ForwardingRule struct {
|
||||
Protocol string `json:"protocol"`
|
||||
DestinationPort PortInfo `json:"destinationPort"`
|
||||
TranslatedAddress string `json:"translatedAddress"`
|
||||
TranslatedHostname string `json:"translatedHostname"`
|
||||
TranslatedPort PortInfo `json:"translatedPort"`
|
||||
}
|
||||
|
||||
// Forwarding groups the daemon RPCs that surface exposed/forwarded services.
|
||||
type Forwarding struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewForwarding(conn DaemonConn) *Forwarding {
|
||||
return &Forwarding{conn: conn}
|
||||
}
|
||||
|
||||
// List returns the current set of forwarding rules from the daemon's
|
||||
// reverse proxy. The frontend renders these as the "exposed services" list.
|
||||
func (s *Forwarding) List(ctx context.Context) ([]ForwardingRule, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := cli.ForwardingRules(ctx, &proto.EmptyRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make([]ForwardingRule, 0, len(resp.GetRules()))
|
||||
for _, r := range resp.GetRules() {
|
||||
out = append(out, forwardingRuleFromProto(r))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func forwardingRuleFromProto(r *proto.ForwardingRule) ForwardingRule {
|
||||
return ForwardingRule{
|
||||
Protocol: r.GetProtocol(),
|
||||
DestinationPort: portInfoFromProto(r.GetDestinationPort()),
|
||||
TranslatedAddress: r.GetTranslatedAddress(),
|
||||
TranslatedHostname: r.GetTranslatedHostname(),
|
||||
TranslatedPort: portInfoFromProto(r.GetTranslatedPort()),
|
||||
}
|
||||
}
|
||||
|
||||
func portInfoFromProto(p *proto.PortInfo) PortInfo {
|
||||
if p == nil {
|
||||
return PortInfo{}
|
||||
}
|
||||
switch sel := p.GetPortSelection().(type) {
|
||||
case *proto.PortInfo_Port:
|
||||
port := sel.Port
|
||||
return PortInfo{Port: &port}
|
||||
case *proto.PortInfo_Range_:
|
||||
r := sel.Range
|
||||
if r == nil {
|
||||
return PortInfo{}
|
||||
}
|
||||
return PortInfo{Range: &PortRange{Start: r.GetStart(), End: r.GetEnd()}}
|
||||
}
|
||||
return PortInfo{}
|
||||
}
|
||||
92
client/ui/services/network.go
Normal file
92
client/ui/services/network.go
Normal file
@@ -0,0 +1,92 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// Network is one routed network the daemon offers to the client.
|
||||
type Network struct {
|
||||
ID string `json:"id"`
|
||||
Range string `json:"range"`
|
||||
Selected bool `json:"selected"`
|
||||
Domains []string `json:"domains"`
|
||||
ResolvedIPs map[string][]string `json:"resolvedIps"`
|
||||
}
|
||||
|
||||
// SelectNetworksParams selects which networks to enable / disable.
|
||||
// All means "every available network" (used by Select-All / Deselect-All buttons);
|
||||
// Append means "leave the existing selection in place and merge these IDs in".
|
||||
type SelectNetworksParams struct {
|
||||
NetworkIDs []string `json:"networkIds"`
|
||||
Append bool `json:"append"`
|
||||
All bool `json:"all"`
|
||||
}
|
||||
|
||||
// Networks groups the daemon RPCs that read and toggle routed networks.
|
||||
type Networks struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewNetworks(conn DaemonConn) *Networks {
|
||||
return &Networks{conn: conn}
|
||||
}
|
||||
|
||||
func (s *Networks) List(ctx context.Context) ([]Network, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := cli.ListNetworks(ctx, &proto.ListNetworksRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make([]Network, 0, len(resp.GetRoutes()))
|
||||
for _, n := range resp.GetRoutes() {
|
||||
out = append(out, networkFromProto(n))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *Networks) Select(ctx context.Context, p SelectNetworksParams) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cli.SelectNetworks(ctx, &proto.SelectNetworksRequest{
|
||||
NetworkIDs: p.NetworkIDs,
|
||||
Append: p.Append,
|
||||
All: p.All,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Networks) Deselect(ctx context.Context, p SelectNetworksParams) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cli.DeselectNetworks(ctx, &proto.SelectNetworksRequest{
|
||||
NetworkIDs: p.NetworkIDs,
|
||||
Append: p.Append,
|
||||
All: p.All,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func networkFromProto(n *proto.Network) Network {
|
||||
resolved := make(map[string][]string, len(n.GetResolvedIPs()))
|
||||
for k, v := range n.GetResolvedIPs() {
|
||||
resolved[k] = append([]string{}, v.GetIps()...)
|
||||
}
|
||||
return Network{
|
||||
ID: n.GetID(),
|
||||
Range: n.GetRange(),
|
||||
Selected: n.GetSelected(),
|
||||
Domains: append([]string{}, n.GetDomains()...),
|
||||
ResolvedIPs: resolved,
|
||||
}
|
||||
}
|
||||
369
client/ui/services/peers.go
Normal file
369
client/ui/services/peers.go
Normal file
@@ -0,0 +1,369 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
// EventStatus is emitted to the frontend whenever a fresh Status snapshot
|
||||
// is captured (from a poll or a stream-driven refresh).
|
||||
EventStatus = "netbird:status"
|
||||
// EventSystem is emitted for each SubscribeEvents message (DNS, network,
|
||||
// auth, connectivity categories).
|
||||
EventSystem = "netbird:event"
|
||||
// EventUpdateAvailable fires when the daemon detects a new version. The
|
||||
// metadata's enforced flag is propagated as part of the payload.
|
||||
EventUpdateAvailable = "netbird:update:available"
|
||||
// EventUpdateProgress fires when the daemon is about to start (or has
|
||||
// started) installing an update — Mode 2 enforced flow. The UI opens the
|
||||
// progress window in response.
|
||||
EventUpdateProgress = "netbird:update:progress"
|
||||
)
|
||||
|
||||
// Emitter is what peers.Watch needs from the host application: a simple
|
||||
// "send this name and payload to the frontend" hook. The Wails app.Event
|
||||
// satisfies this with its Emit method.
|
||||
type Emitter interface {
|
||||
Emit(name string, data ...any) bool
|
||||
}
|
||||
|
||||
// UpdateAvailable carries the new_version_available metadata.
|
||||
type UpdateAvailable struct {
|
||||
Version string `json:"version"`
|
||||
Enforced bool `json:"enforced"`
|
||||
}
|
||||
|
||||
// UpdateProgress carries the progress_window metadata.
|
||||
type UpdateProgress struct {
|
||||
Action string `json:"action"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// SystemEvent is the frontend-facing shape of a daemon SystemEvent.
|
||||
type SystemEvent struct {
|
||||
ID string `json:"id"`
|
||||
Severity string `json:"severity"`
|
||||
Category string `json:"category"`
|
||||
Message string `json:"message"`
|
||||
UserMessage string `json:"userMessage"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
}
|
||||
|
||||
// PeerStatus is the frontend-facing shape of a daemon PeerState. Carries
|
||||
// enough detail for the dashboard's compact peer row plus the on-click
|
||||
// troubleshooting expansion (ICE candidate types, endpoints, handshake age).
|
||||
type PeerStatus struct {
|
||||
IP string `json:"ip"`
|
||||
PubKey string `json:"pubKey"`
|
||||
ConnStatus string `json:"connStatus"`
|
||||
ConnStatusUpdateUnix int64 `json:"connStatusUpdateUnix"`
|
||||
Relayed bool `json:"relayed"`
|
||||
LocalIceCandidateType string `json:"localIceCandidateType"`
|
||||
RemoteIceCandidateType string `json:"remoteIceCandidateType"`
|
||||
LocalIceCandidateEndpoint string `json:"localIceCandidateEndpoint"`
|
||||
RemoteIceCandidateEndpoint string `json:"remoteIceCandidateEndpoint"`
|
||||
Fqdn string `json:"fqdn"`
|
||||
BytesRx int64 `json:"bytesRx"`
|
||||
BytesTx int64 `json:"bytesTx"`
|
||||
LatencyMs int64 `json:"latencyMs"`
|
||||
RelayAddress string `json:"relayAddress"`
|
||||
LastHandshakeUnix int64 `json:"lastHandshakeUnix"`
|
||||
RosenpassEnabled bool `json:"rosenpassEnabled"`
|
||||
Networks []string `json:"networks"`
|
||||
}
|
||||
|
||||
// PeerLink is one of the named connections between this peer and its mgmt
|
||||
// or signal server.
|
||||
type PeerLink struct {
|
||||
URL string `json:"url"`
|
||||
Connected bool `json:"connected"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// LocalPeer mirrors LocalPeerState — what this client looks like on the mesh.
|
||||
type LocalPeer struct {
|
||||
IP string `json:"ip"`
|
||||
PubKey string `json:"pubKey"`
|
||||
Fqdn string `json:"fqdn"`
|
||||
Networks []string `json:"networks"`
|
||||
}
|
||||
|
||||
// Status is the snapshot the frontend renders on the dashboard.
|
||||
type Status struct {
|
||||
Status string `json:"status"`
|
||||
DaemonVersion string `json:"daemonVersion"`
|
||||
Management PeerLink `json:"management"`
|
||||
Signal PeerLink `json:"signal"`
|
||||
Local LocalPeer `json:"local"`
|
||||
Peers []PeerStatus `json:"peers"`
|
||||
Events []SystemEvent `json:"events"`
|
||||
}
|
||||
|
||||
// Peers serves the dashboard data: one polled Status RPC and a long-running
|
||||
// SubscribeEvents stream that re-emits every event over the Wails event bus.
|
||||
type Peers struct {
|
||||
conn DaemonConn
|
||||
emitter Emitter
|
||||
|
||||
mu sync.Mutex
|
||||
cancel context.CancelFunc
|
||||
streamWg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewPeers(conn DaemonConn, emitter Emitter) *Peers {
|
||||
return &Peers{conn: conn, emitter: emitter}
|
||||
}
|
||||
|
||||
// Watch starts the background loops that feed the frontend:
|
||||
// - statusStreamLoop: push-driven snapshots on connection-state change
|
||||
// (Connected/Disconnected/Connecting, peer list, address). Drives the
|
||||
// tray icon, Status page, and Peers page.
|
||||
// - toastStreamLoop: DNS / network / auth / connectivity / update
|
||||
// SystemEvent stream. Drives OS notifications, the Recent Events
|
||||
// list, and the update-overlay flag. The daemon-side RPC is named
|
||||
// SubscribeEvents — only the loop's local alias differs to keep the
|
||||
// two streams distinguishable in this file.
|
||||
//
|
||||
// Safe to call once at boot; both loops self-restart on stream errors
|
||||
// via exponential backoff.
|
||||
func (s *Peers) Watch(ctx context.Context) {
|
||||
s.mu.Lock()
|
||||
if s.cancel != nil {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s.cancel = cancel
|
||||
s.mu.Unlock()
|
||||
|
||||
s.streamWg.Add(2)
|
||||
go s.statusStreamLoop(ctx)
|
||||
go s.toastStreamLoop(ctx)
|
||||
}
|
||||
|
||||
// ServiceShutdown is the Wails service hook fired on app exit.
|
||||
func (s *Peers) ServiceShutdown() error {
|
||||
s.mu.Lock()
|
||||
cancel := s.cancel
|
||||
s.cancel = nil
|
||||
s.mu.Unlock()
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
s.streamWg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the current daemon status snapshot.
|
||||
func (s *Peers) Get(ctx context.Context) (Status, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return Status{}, err
|
||||
}
|
||||
resp, err := cli.Status(ctx, &proto.StatusRequest{GetFullPeerStatus: true})
|
||||
if err != nil {
|
||||
return Status{}, err
|
||||
}
|
||||
return statusFromProto(resp), nil
|
||||
}
|
||||
|
||||
// statusStreamLoop subscribes to the daemon's SubscribeStatus stream and
|
||||
// re-emits each FullStatus snapshot on the Wails event bus. The first
|
||||
// message is the current snapshot; subsequent messages fire on
|
||||
// connection-state changes only — no fixed-interval polling, no idle
|
||||
// chatter. Reconnects with exponential backoff if the stream drops
|
||||
// (daemon restart, socket break).
|
||||
func (s *Peers) statusStreamLoop(ctx context.Context) {
|
||||
defer s.streamWg.Done()
|
||||
|
||||
bo := backoff.WithContext(&backoff.ExponentialBackOff{
|
||||
InitialInterval: time.Second,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: 10 * time.Second,
|
||||
MaxElapsedTime: 0,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}, ctx)
|
||||
|
||||
op := func() error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get client: %w", err)
|
||||
}
|
||||
stream, err := cli.SubscribeStatus(ctx, &proto.StatusRequest{GetFullPeerStatus: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribe status: %w", err)
|
||||
}
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
return fmt.Errorf("status stream recv: %w", err)
|
||||
}
|
||||
st := statusFromProto(resp)
|
||||
log.Infof("backend event: status status=%q peers=%d", st.Status, len(st.Peers))
|
||||
s.emitter.Emit(EventStatus, st)
|
||||
}
|
||||
}
|
||||
|
||||
if err := backoff.Retry(op, bo); err != nil && ctx.Err() == nil {
|
||||
log.Errorf("status stream ended: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// toastStreamLoop subscribes to the daemon's SubscribeEvents RPC and
|
||||
// re-emits every SystemEvent on the Wails event bus. The downstream
|
||||
// consumers turn these into OS notifications, populate the Recent
|
||||
// Events card on the Status page, and listen for the
|
||||
// "new_version_available" metadata to flip the tray's update overlay.
|
||||
// Local name differs from the RPC ("SubscribeEvents") so the file's
|
||||
// two streams aren't both called streamLoop.
|
||||
func (s *Peers) toastStreamLoop(ctx context.Context) {
|
||||
defer s.streamWg.Done()
|
||||
|
||||
bo := backoff.WithContext(&backoff.ExponentialBackOff{
|
||||
InitialInterval: time.Second,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: 10 * time.Second,
|
||||
MaxElapsedTime: 0,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}, ctx)
|
||||
|
||||
op := func() error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get client: %w", err)
|
||||
}
|
||||
stream, err := cli.SubscribeEvents(ctx, &proto.SubscribeRequest{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribe: %w", err)
|
||||
}
|
||||
for {
|
||||
ev, err := stream.Recv()
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
return fmt.Errorf("stream recv: %w", err)
|
||||
}
|
||||
se := systemEventFromProto(ev)
|
||||
log.Infof("backend event: system severity=%s category=%s msg=%q", se.Severity, se.Category, se.UserMessage)
|
||||
s.emitter.Emit(EventSystem, se)
|
||||
s.fanOutUpdateEvents(ev)
|
||||
}
|
||||
}
|
||||
|
||||
if err := backoff.Retry(op, bo); err != nil && ctx.Err() == nil {
|
||||
log.Errorf("event stream ended: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func statusFromProto(resp *proto.StatusResponse) Status {
|
||||
full := resp.GetFullStatus()
|
||||
mgmt := full.GetManagementState()
|
||||
sig := full.GetSignalState()
|
||||
local := full.GetLocalPeerState()
|
||||
|
||||
st := Status{
|
||||
Status: resp.GetStatus(),
|
||||
DaemonVersion: resp.GetDaemonVersion(),
|
||||
Management: PeerLink{
|
||||
URL: mgmt.GetURL(),
|
||||
Connected: mgmt.GetConnected(),
|
||||
Error: mgmt.GetError(),
|
||||
},
|
||||
Signal: PeerLink{
|
||||
URL: sig.GetURL(),
|
||||
Connected: sig.GetConnected(),
|
||||
Error: sig.GetError(),
|
||||
},
|
||||
Local: LocalPeer{
|
||||
IP: local.GetIP(),
|
||||
PubKey: local.GetPubKey(),
|
||||
Fqdn: local.GetFqdn(),
|
||||
Networks: append([]string{}, local.GetNetworks()...),
|
||||
},
|
||||
}
|
||||
|
||||
for _, p := range full.GetPeers() {
|
||||
st.Peers = append(st.Peers, PeerStatus{
|
||||
IP: p.GetIP(),
|
||||
PubKey: p.GetPubKey(),
|
||||
ConnStatus: p.GetConnStatus(),
|
||||
ConnStatusUpdateUnix: p.GetConnStatusUpdate().GetSeconds(),
|
||||
Relayed: p.GetRelayed(),
|
||||
LocalIceCandidateType: p.GetLocalIceCandidateType(),
|
||||
RemoteIceCandidateType: p.GetRemoteIceCandidateType(),
|
||||
LocalIceCandidateEndpoint: p.GetLocalIceCandidateEndpoint(),
|
||||
RemoteIceCandidateEndpoint: p.GetRemoteIceCandidateEndpoint(),
|
||||
Fqdn: p.GetFqdn(),
|
||||
BytesRx: p.GetBytesRx(),
|
||||
BytesTx: p.GetBytesTx(),
|
||||
LatencyMs: p.GetLatency().AsDuration().Milliseconds(),
|
||||
RelayAddress: p.GetRelayAddress(),
|
||||
LastHandshakeUnix: p.GetLastWireguardHandshake().GetSeconds(),
|
||||
RosenpassEnabled: p.GetRosenpassEnabled(),
|
||||
Networks: append([]string{}, p.GetNetworks()...),
|
||||
})
|
||||
}
|
||||
for _, e := range full.GetEvents() {
|
||||
st.Events = append(st.Events, systemEventFromProto(e))
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// fanOutUpdateEvents inspects the daemon SystemEvent for update-related
|
||||
// metadata keys and re-emits them as dedicated Wails events. This lets the
|
||||
// tray and React update window listen for a single, narrow event instead of
|
||||
// re-checking metadata on every system event they receive.
|
||||
func (s *Peers) fanOutUpdateEvents(ev *proto.SystemEvent) {
|
||||
md := ev.GetMetadata()
|
||||
if md == nil {
|
||||
return
|
||||
}
|
||||
if v, ok := md["new_version_available"]; ok {
|
||||
_, enforced := md["enforced"]
|
||||
s.emitter.Emit(EventUpdateAvailable, UpdateAvailable{Version: v, Enforced: enforced})
|
||||
}
|
||||
if action, ok := md["progress_window"]; ok {
|
||||
s.emitter.Emit(EventUpdateProgress, UpdateProgress{
|
||||
Action: action,
|
||||
Version: md["version"],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func systemEventFromProto(e *proto.SystemEvent) SystemEvent {
|
||||
out := SystemEvent{
|
||||
ID: e.GetId(),
|
||||
Severity: strings.ToLower(strings.TrimPrefix(e.GetSeverity().String(), "SystemEvent_")),
|
||||
Category: strings.ToLower(strings.TrimPrefix(e.GetCategory().String(), "SystemEvent_")),
|
||||
Message: e.GetMessage(),
|
||||
UserMessage: e.GetUserMessage(),
|
||||
Metadata: map[string]string{},
|
||||
}
|
||||
if ts := e.GetTimestamp(); ts != nil {
|
||||
out.Timestamp = ts.GetSeconds()
|
||||
}
|
||||
for k, v := range e.GetMetadata() {
|
||||
out.Metadata[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
||||
118
client/ui/services/profile.go
Normal file
118
client/ui/services/profile.go
Normal file
@@ -0,0 +1,118 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/user"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// Profile is one named daemon profile.
|
||||
type Profile struct {
|
||||
Name string `json:"name"`
|
||||
IsActive bool `json:"isActive"`
|
||||
}
|
||||
|
||||
// ProfileRef identifies a profile by name+username.
|
||||
type ProfileRef struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// ActiveProfile is the result of GetActiveProfile.
|
||||
type ActiveProfile struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// Profiles groups the daemon RPCs that manage named profiles.
|
||||
type Profiles struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewProfiles(conn DaemonConn) *Profiles {
|
||||
return &Profiles{conn: conn}
|
||||
}
|
||||
|
||||
// Username returns the OS username the daemon expects for profile lookups.
|
||||
// The frontend calls this once at boot and reuses the result.
|
||||
func (s *Profiles) Username() (string, error) {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return u.Username, nil
|
||||
}
|
||||
|
||||
func (s *Profiles) List(ctx context.Context, username string) ([]Profile, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := cli.ListProfiles(ctx, &proto.ListProfilesRequest{Username: username})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make([]Profile, 0, len(resp.GetProfiles()))
|
||||
for _, p := range resp.GetProfiles() {
|
||||
out = append(out, Profile{Name: p.GetName(), IsActive: p.GetIsActive()})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *Profiles) GetActive(ctx context.Context) (ActiveProfile, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return ActiveProfile{}, err
|
||||
}
|
||||
resp, err := cli.GetActiveProfile(ctx, &proto.GetActiveProfileRequest{})
|
||||
if err != nil {
|
||||
return ActiveProfile{}, err
|
||||
}
|
||||
return ActiveProfile{
|
||||
ProfileName: resp.GetProfileName(),
|
||||
Username: resp.GetUsername(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Profiles) Switch(ctx context.Context, p ProfileRef) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &proto.SwitchProfileRequest{}
|
||||
if p.ProfileName != "" {
|
||||
req.ProfileName = ptrStr(p.ProfileName)
|
||||
}
|
||||
if p.Username != "" {
|
||||
req.Username = ptrStr(p.Username)
|
||||
}
|
||||
_, err = cli.SwitchProfile(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Profiles) Add(ctx context.Context, p ProfileRef) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cli.AddProfile(ctx, &proto.AddProfileRequest{
|
||||
ProfileName: p.ProfileName,
|
||||
Username: p.Username,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Profiles) Remove(ctx context.Context, p ProfileRef) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cli.RemoveProfile(ctx, &proto.RemoveProfileRequest{
|
||||
ProfileName: p.ProfileName,
|
||||
Username: p.Username,
|
||||
})
|
||||
return err
|
||||
}
|
||||
192
client/ui/services/settings.go
Normal file
192
client/ui/services/settings.go
Normal file
@@ -0,0 +1,192 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// ConfigParams selects which profile/user to read or write config for.
|
||||
type ConfigParams struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// Config is the daemon configuration the UI exposes in the settings window.
|
||||
// Pointer fields mark "set" vs "unset" so the UI can omit a value to keep the
|
||||
// daemon's current setting (matching SetConfigRequest's optional semantics).
|
||||
type Config struct {
|
||||
ManagementURL string `json:"managementUrl"`
|
||||
AdminURL string `json:"adminUrl"`
|
||||
ConfigFile string `json:"configFile"`
|
||||
LogFile string `json:"logFile"`
|
||||
PreSharedKey string `json:"preSharedKey"`
|
||||
InterfaceName string `json:"interfaceName"`
|
||||
WireguardPort int64 `json:"wireguardPort"`
|
||||
MTU int64 `json:"mtu"`
|
||||
DisableAutoConnect bool `json:"disableAutoConnect"`
|
||||
ServerSSHAllowed bool `json:"serverSshAllowed"`
|
||||
RosenpassEnabled bool `json:"rosenpassEnabled"`
|
||||
RosenpassPermissive bool `json:"rosenpassPermissive"`
|
||||
DisableNotifications bool `json:"disableNotifications"`
|
||||
LazyConnectionEnabled bool `json:"lazyConnectionEnabled"`
|
||||
BlockInbound bool `json:"blockInbound"`
|
||||
NetworkMonitor bool `json:"networkMonitor"`
|
||||
DisableClientRoutes bool `json:"disableClientRoutes"`
|
||||
DisableServerRoutes bool `json:"disableServerRoutes"`
|
||||
DisableDNS bool `json:"disableDns"`
|
||||
BlockLANAccess bool `json:"blockLanAccess"`
|
||||
EnableSSHRoot bool `json:"enableSshRoot"`
|
||||
EnableSSHSFTP bool `json:"enableSshSftp"`
|
||||
EnableSSHLocalPortForwarding bool `json:"enableSshLocalPortForwarding"`
|
||||
EnableSSHRemotePortForwarding bool `json:"enableSshRemotePortForwarding"`
|
||||
DisableSSHAuth bool `json:"disableSshAuth"`
|
||||
SSHJWTCacheTTL int32 `json:"sshJwtCacheTtl"`
|
||||
}
|
||||
|
||||
// SetConfigParams is a partial update — only fields with non-nil pointers
|
||||
// are sent to the daemon. The frontend uses this to flip individual toggles.
|
||||
type SetConfigParams struct {
|
||||
ProfileName string `json:"profileName"`
|
||||
Username string `json:"username"`
|
||||
ManagementURL string `json:"managementUrl"`
|
||||
AdminURL string `json:"adminUrl"`
|
||||
InterfaceName *string `json:"interfaceName,omitempty"`
|
||||
WireguardPort *int64 `json:"wireguardPort,omitempty"`
|
||||
MTU *int64 `json:"mtu,omitempty"`
|
||||
PreSharedKey *string `json:"preSharedKey,omitempty"`
|
||||
DisableAutoConnect *bool `json:"disableAutoConnect,omitempty"`
|
||||
ServerSSHAllowed *bool `json:"serverSshAllowed,omitempty"`
|
||||
RosenpassEnabled *bool `json:"rosenpassEnabled,omitempty"`
|
||||
RosenpassPermissive *bool `json:"rosenpassPermissive,omitempty"`
|
||||
DisableNotifications *bool `json:"disableNotifications,omitempty"`
|
||||
LazyConnectionEnabled *bool `json:"lazyConnectionEnabled,omitempty"`
|
||||
BlockInbound *bool `json:"blockInbound,omitempty"`
|
||||
NetworkMonitor *bool `json:"networkMonitor,omitempty"`
|
||||
DisableClientRoutes *bool `json:"disableClientRoutes,omitempty"`
|
||||
DisableServerRoutes *bool `json:"disableServerRoutes,omitempty"`
|
||||
DisableDNS *bool `json:"disableDns,omitempty"`
|
||||
DisableFirewall *bool `json:"disableFirewall,omitempty"`
|
||||
BlockLANAccess *bool `json:"blockLanAccess,omitempty"`
|
||||
EnableSSHRoot *bool `json:"enableSshRoot,omitempty"`
|
||||
EnableSSHSFTP *bool `json:"enableSshSftp,omitempty"`
|
||||
EnableSSHLocalPortForwarding *bool `json:"enableSshLocalPortForwarding,omitempty"`
|
||||
EnableSSHRemotePortForwarding *bool `json:"enableSshRemotePortForwarding,omitempty"`
|
||||
DisableSSHAuth *bool `json:"disableSshAuth,omitempty"`
|
||||
SSHJWTCacheTTL *int32 `json:"sshJwtCacheTtl,omitempty"`
|
||||
}
|
||||
|
||||
// Features reports which UI surfaces the daemon has disabled. The Fyne UI uses
|
||||
// these flags to grey out menu items the operator turned off server-side.
|
||||
type Features struct {
|
||||
DisableProfiles bool `json:"disableProfiles"`
|
||||
DisableUpdateSettings bool `json:"disableUpdateSettings"`
|
||||
DisableNetworks bool `json:"disableNetworks"`
|
||||
}
|
||||
|
||||
// Settings groups the daemon RPCs that read and write the daemon config.
|
||||
type Settings struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewSettings(conn DaemonConn) *Settings {
|
||||
return &Settings{conn: conn}
|
||||
}
|
||||
|
||||
func (s *Settings) GetConfig(ctx context.Context, p ConfigParams) (Config, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
resp, err := cli.GetConfig(ctx, &proto.GetConfigRequest{
|
||||
ProfileName: p.ProfileName,
|
||||
Username: p.Username,
|
||||
})
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
}
|
||||
return Config{
|
||||
ManagementURL: resp.GetManagementUrl(),
|
||||
AdminURL: resp.GetAdminURL(),
|
||||
ConfigFile: resp.GetConfigFile(),
|
||||
LogFile: resp.GetLogFile(),
|
||||
PreSharedKey: resp.GetPreSharedKey(),
|
||||
InterfaceName: resp.GetInterfaceName(),
|
||||
WireguardPort: resp.GetWireguardPort(),
|
||||
MTU: resp.GetMtu(),
|
||||
DisableAutoConnect: resp.GetDisableAutoConnect(),
|
||||
ServerSSHAllowed: resp.GetServerSSHAllowed(),
|
||||
RosenpassEnabled: resp.GetRosenpassEnabled(),
|
||||
RosenpassPermissive: resp.GetRosenpassPermissive(),
|
||||
DisableNotifications: resp.GetDisableNotifications(),
|
||||
LazyConnectionEnabled: resp.GetLazyConnectionEnabled(),
|
||||
BlockInbound: resp.GetBlockInbound(),
|
||||
NetworkMonitor: resp.GetNetworkMonitor(),
|
||||
DisableClientRoutes: resp.GetDisableClientRoutes(),
|
||||
DisableServerRoutes: resp.GetDisableServerRoutes(),
|
||||
DisableDNS: resp.GetDisableDns(),
|
||||
BlockLANAccess: resp.GetBlockLanAccess(),
|
||||
EnableSSHRoot: resp.GetEnableSSHRoot(),
|
||||
EnableSSHSFTP: resp.GetEnableSSHSFTP(),
|
||||
EnableSSHLocalPortForwarding: resp.GetEnableSSHLocalPortForwarding(),
|
||||
EnableSSHRemotePortForwarding: resp.GetEnableSSHRemotePortForwarding(),
|
||||
DisableSSHAuth: resp.GetDisableSSHAuth(),
|
||||
SSHJWTCacheTTL: resp.GetSshJWTCacheTTL(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Settings) SetConfig(ctx context.Context, p SetConfigParams) error {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &proto.SetConfigRequest{
|
||||
ProfileName: p.ProfileName,
|
||||
Username: p.Username,
|
||||
ManagementUrl: p.ManagementURL,
|
||||
AdminURL: p.AdminURL,
|
||||
InterfaceName: p.InterfaceName,
|
||||
WireguardPort: p.WireguardPort,
|
||||
Mtu: p.MTU,
|
||||
OptionalPreSharedKey: p.PreSharedKey,
|
||||
DisableAutoConnect: p.DisableAutoConnect,
|
||||
ServerSSHAllowed: p.ServerSSHAllowed,
|
||||
RosenpassEnabled: p.RosenpassEnabled,
|
||||
RosenpassPermissive: p.RosenpassPermissive,
|
||||
DisableNotifications: p.DisableNotifications,
|
||||
LazyConnectionEnabled: p.LazyConnectionEnabled,
|
||||
BlockInbound: p.BlockInbound,
|
||||
NetworkMonitor: p.NetworkMonitor,
|
||||
DisableClientRoutes: p.DisableClientRoutes,
|
||||
DisableServerRoutes: p.DisableServerRoutes,
|
||||
DisableDns: p.DisableDNS,
|
||||
DisableFirewall: p.DisableFirewall,
|
||||
BlockLanAccess: p.BlockLANAccess,
|
||||
EnableSSHRoot: p.EnableSSHRoot,
|
||||
EnableSSHSFTP: p.EnableSSHSFTP,
|
||||
EnableSSHLocalPortForwarding: p.EnableSSHLocalPortForwarding,
|
||||
EnableSSHRemotePortForwarding: p.EnableSSHRemotePortForwarding,
|
||||
DisableSSHAuth: p.DisableSSHAuth,
|
||||
SshJWTCacheTTL: p.SSHJWTCacheTTL,
|
||||
}
|
||||
_, err = cli.SetConfig(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Settings) GetFeatures(ctx context.Context) (Features, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return Features{}, err
|
||||
}
|
||||
resp, err := cli.GetFeatures(ctx, &proto.GetFeaturesRequest{})
|
||||
if err != nil {
|
||||
return Features{}, err
|
||||
}
|
||||
return Features{
|
||||
DisableProfiles: resp.GetDisableProfiles(),
|
||||
DisableUpdateSettings: resp.GetDisableUpdateSettings(),
|
||||
DisableNetworks: resp.GetDisableNetworks(),
|
||||
}, nil
|
||||
}
|
||||
70
client/ui/services/update.go
Normal file
70
client/ui/services/update.go
Normal file
@@ -0,0 +1,70 @@
|
||||
//go:build !android && !ios && !freebsd && !js
|
||||
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/wailsapp/wails/v3/pkg/application"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// UpdateResult mirrors TriggerUpdateResponse: Success false carries an error
|
||||
// message in ErrorMsg.
|
||||
type UpdateResult struct {
|
||||
Success bool `json:"success"`
|
||||
ErrorMsg string `json:"errorMsg"`
|
||||
}
|
||||
|
||||
// Update groups the RPCs that drive the enforced-update install flow.
|
||||
type Update struct {
|
||||
conn DaemonConn
|
||||
}
|
||||
|
||||
func NewUpdate(conn DaemonConn) *Update {
|
||||
return &Update{conn: conn}
|
||||
}
|
||||
|
||||
// Quit asks the host application to exit. The /update page calls this once
|
||||
// the daemon-side installer has reported success, mirroring the legacy
|
||||
// Fyne UI's app.Quit() in showInstallerResult. Schedules the actual exit
|
||||
// off the calling goroutine so the JS-side caller's response can return
|
||||
// before the runtime tears down.
|
||||
func (s *Update) Quit() {
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
application.Get().Quit()
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Update) Trigger(ctx context.Context) (UpdateResult, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return UpdateResult{}, err
|
||||
}
|
||||
resp, err := cli.TriggerUpdate(ctx, &proto.TriggerUpdateRequest{})
|
||||
if err != nil {
|
||||
return UpdateResult{}, err
|
||||
}
|
||||
return UpdateResult{
|
||||
Success: resp.GetSuccess(),
|
||||
ErrorMsg: resp.GetErrorMsg(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Update) GetInstallerResult(ctx context.Context) (UpdateResult, error) {
|
||||
cli, err := s.conn.Client()
|
||||
if err != nil {
|
||||
return UpdateResult{}, err
|
||||
}
|
||||
resp, err := cli.GetInstallerResult(ctx, &proto.InstallerResultRequest{})
|
||||
if err != nil {
|
||||
return UpdateResult{}, err
|
||||
}
|
||||
return UpdateResult{
|
||||
Success: resp.GetSuccess(),
|
||||
ErrorMsg: resp.GetErrorMsg(),
|
||||
}, nil
|
||||
}
|
||||
Reference in New Issue
Block a user