Merge remote-tracking branch 'origin/main' into refactor/permissions-manager

# Conflicts:
#	management/internals/modules/reverseproxy/domain/manager/manager.go
#	management/internals/modules/reverseproxy/service/manager/api.go
#	management/internals/server/modules.go
#	management/server/http/testing/testing_tools/channel/channel.go
This commit is contained in:
pascal
2026-03-17 12:38:08 +01:00
244 changed files with 17304 additions and 3509 deletions

View File

@@ -124,7 +124,7 @@ func (c *Client) Run(platformFiles PlatformFiles, urlOpener URLOpener, isAndroid
// todo do not throw error in case of cancelled context
ctx = internal.CtxInitState(ctx)
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false)
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder)
return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile)
}
@@ -157,7 +157,7 @@ func (c *Client) RunWithoutLogin(platformFiles PlatformFiles, dns *DNSList, dnsR
// todo do not throw error in case of cancelled context
ctx = internal.CtxInitState(ctx)
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false)
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder)
return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile)
}

View File

@@ -22,20 +22,24 @@ import (
var pinRegexp = regexp.MustCompile(`^\d{6}$`)
var (
exposePin string
exposePassword string
exposeUserGroups []string
exposeDomain string
exposeNamePrefix string
exposeProtocol string
exposePin string
exposePassword string
exposeUserGroups []string
exposeDomain string
exposeNamePrefix string
exposeProtocol string
exposeExternalPort uint16
)
var exposeCmd = &cobra.Command{
Use: "expose <port>",
Short: "Expose a local port via the NetBird reverse proxy",
Args: cobra.ExactArgs(1),
Example: "netbird expose --with-password safe-pass 8080",
RunE: exposeFn,
Use: "expose <port>",
Short: "Expose a local port via the NetBird reverse proxy",
Args: cobra.ExactArgs(1),
Example: ` netbird expose --with-password safe-pass 8080
netbird expose --protocol tcp 5432
netbird expose --protocol tcp --with-external-port 5433 5432
netbird expose --protocol tls --with-custom-domain tls.example.com 4443`,
RunE: exposeFn,
}
func init() {
@@ -44,7 +48,52 @@ func init() {
exposeCmd.Flags().StringSliceVar(&exposeUserGroups, "with-user-groups", nil, "Restrict access to specific user groups with SSO (e.g. --with-user-groups devops,Backend)")
exposeCmd.Flags().StringVar(&exposeDomain, "with-custom-domain", "", "Custom domain for the exposed service, must be configured to your account (e.g. --with-custom-domain myapp.example.com)")
exposeCmd.Flags().StringVar(&exposeNamePrefix, "with-name-prefix", "", "Prefix for the generated service name (e.g. --with-name-prefix my-app)")
exposeCmd.Flags().StringVar(&exposeProtocol, "protocol", "http", "Protocol to use, http/https is supported (e.g. --protocol http)")
exposeCmd.Flags().StringVar(&exposeProtocol, "protocol", "http", "Protocol to use: http, https, tcp, udp, or tls (e.g. --protocol tcp)")
exposeCmd.Flags().Uint16Var(&exposeExternalPort, "with-external-port", 0, "Public-facing external port on the proxy cluster (defaults to the target port for L4)")
}
// isClusterProtocol returns true for L4/TLS protocols that reject HTTP-style auth flags.
func isClusterProtocol(protocol string) bool {
switch strings.ToLower(protocol) {
case "tcp", "udp", "tls":
return true
default:
return false
}
}
// isPortBasedProtocol returns true for pure port-based protocols (TCP/UDP)
// where domain display doesn't apply. TLS uses SNI so it has a domain.
func isPortBasedProtocol(protocol string) bool {
switch strings.ToLower(protocol) {
case "tcp", "udp":
return true
default:
return false
}
}
// extractPort returns the port portion of a URL like "tcp://host:12345", or
// falls back to the given default formatted as a string.
func extractPort(serviceURL string, fallback uint16) string {
u := serviceURL
if idx := strings.Index(u, "://"); idx != -1 {
u = u[idx+3:]
}
if i := strings.LastIndex(u, ":"); i != -1 {
if p := u[i+1:]; p != "" {
return p
}
}
return strconv.FormatUint(uint64(fallback), 10)
}
// resolveExternalPort returns the effective external port, defaulting to the target port.
func resolveExternalPort(targetPort uint64) uint16 {
if exposeExternalPort != 0 {
return exposeExternalPort
}
return uint16(targetPort)
}
func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) {
@@ -57,7 +106,15 @@ func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) {
}
if !isProtocolValid(exposeProtocol) {
return 0, fmt.Errorf("unsupported protocol %q: only 'http' or 'https' are supported", exposeProtocol)
return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol)
}
if isClusterProtocol(exposeProtocol) {
if exposePin != "" || exposePassword != "" || len(exposeUserGroups) > 0 {
return 0, fmt.Errorf("auth flags (--with-pin, --with-password, --with-user-groups) are not supported for %s protocol", exposeProtocol)
}
} else if cmd.Flags().Changed("with-external-port") {
return 0, fmt.Errorf("--with-external-port is not supported for %s protocol", exposeProtocol)
}
if exposePin != "" && !pinRegexp.MatchString(exposePin) {
@@ -76,7 +133,12 @@ func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) {
}
func isProtocolValid(exposeProtocol string) bool {
return strings.ToLower(exposeProtocol) == "http" || strings.ToLower(exposeProtocol) == "https"
switch strings.ToLower(exposeProtocol) {
case "http", "https", "tcp", "udp", "tls":
return true
default:
return false
}
}
func exposeFn(cmd *cobra.Command, args []string) error {
@@ -123,7 +185,7 @@ func exposeFn(cmd *cobra.Command, args []string) error {
return err
}
stream, err := client.ExposeService(ctx, &proto.ExposeServiceRequest{
req := &proto.ExposeServiceRequest{
Port: uint32(port),
Protocol: protocol,
Pin: exposePin,
@@ -131,7 +193,12 @@ func exposeFn(cmd *cobra.Command, args []string) error {
UserGroups: exposeUserGroups,
Domain: exposeDomain,
NamePrefix: exposeNamePrefix,
})
}
if isClusterProtocol(exposeProtocol) {
req.ListenPort = uint32(resolveExternalPort(port))
}
stream, err := client.ExposeService(ctx, req)
if err != nil {
return fmt.Errorf("expose service: %w", err)
}
@@ -149,8 +216,14 @@ func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) {
return proto.ExposeProtocol_EXPOSE_HTTP, nil
case "https":
return proto.ExposeProtocol_EXPOSE_HTTPS, nil
case "tcp":
return proto.ExposeProtocol_EXPOSE_TCP, nil
case "udp":
return proto.ExposeProtocol_EXPOSE_UDP, nil
case "tls":
return proto.ExposeProtocol_EXPOSE_TLS, nil
default:
return 0, fmt.Errorf("unsupported protocol %q: only 'http' or 'https' are supported", exposeProtocol)
return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol)
}
}
@@ -160,20 +233,33 @@ func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServ
return fmt.Errorf("receive expose event: %w", err)
}
switch e := event.Event.(type) {
case *proto.ExposeServiceEvent_Ready:
cmd.Println("Service exposed successfully!")
cmd.Printf(" Name: %s\n", e.Ready.ServiceName)
cmd.Printf(" URL: %s\n", e.Ready.ServiceUrl)
cmd.Printf(" Domain: %s\n", e.Ready.Domain)
cmd.Printf(" Protocol: %s\n", exposeProtocol)
cmd.Printf(" Port: %d\n", port)
cmd.Println()
cmd.Println("Press Ctrl+C to stop exposing.")
return nil
default:
ready, ok := event.Event.(*proto.ExposeServiceEvent_Ready)
if !ok {
return fmt.Errorf("unexpected expose event: %T", event.Event)
}
printExposeReady(cmd, ready.Ready, port)
return nil
}
func printExposeReady(cmd *cobra.Command, r *proto.ExposeServiceReady, port uint64) {
cmd.Println("Service exposed successfully!")
cmd.Printf(" Name: %s\n", r.ServiceName)
if r.ServiceUrl != "" {
cmd.Printf(" URL: %s\n", r.ServiceUrl)
}
if r.Domain != "" && !isPortBasedProtocol(exposeProtocol) {
cmd.Printf(" Domain: %s\n", r.Domain)
}
cmd.Printf(" Protocol: %s\n", exposeProtocol)
cmd.Printf(" Internal: %d\n", port)
if isClusterProtocol(exposeProtocol) {
cmd.Printf(" External: %s\n", extractPort(r.ServiceUrl, resolveExternalPort(port)))
}
if r.PortAutoAssigned && exposeExternalPort != 0 {
cmd.Printf("\n Note: requested port %d was reassigned\n", exposeExternalPort)
}
cmd.Println()
cmd.Println("Press Ctrl+C to stop exposing.")
}
func waitForExposeEvents(cmd *cobra.Command, ctx context.Context, stream proto.DaemonService_ExposeServiceClient) error {

View File

@@ -7,7 +7,7 @@ import (
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
"github.com/netbirdio/netbird/client/internal/updater/reposign"
)
var (

View File

@@ -6,7 +6,7 @@ import (
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
"github.com/netbirdio/netbird/client/internal/updater/reposign"
)
const (

View File

@@ -7,7 +7,7 @@ import (
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
"github.com/netbirdio/netbird/client/internal/updater/reposign"
)
const (

View File

@@ -7,7 +7,7 @@ import (
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
"github.com/netbirdio/netbird/client/internal/updater/reposign"
)
var (

View File

@@ -197,7 +197,7 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command, activeProf *pr
r := peer.NewRecorder(config.ManagementURL.String())
r.GetFullStatus()
connectClient := internal.NewConnectClient(ctx, config, r, false)
connectClient := internal.NewConnectClient(ctx, config, r)
SetupDebugHandler(ctx, config, r, connectClient, "")
return connectClient.Run(nil, util.FindFirstLogPath(logFiles))

View File

@@ -11,7 +11,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
"github.com/netbirdio/netbird/client/internal/updater/installer"
"github.com/netbirdio/netbird/util"
)

View File

@@ -14,6 +14,7 @@ import (
"github.com/sirupsen/logrus"
wgnetstack "golang.zx2c4.com/wireguard/tun/netstack"
"github.com/netbirdio/netbird/client/iface"
"github.com/netbirdio/netbird/client/iface/netstack"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/internal/auth"
@@ -81,6 +82,12 @@ type Options struct {
BlockInbound bool
// WireguardPort is the port for the WireGuard interface. Use 0 for a random port.
WireguardPort *int
// MTU is the MTU for the WireGuard interface.
// Valid values are in the range 576..8192 bytes.
// If non-nil, this value overrides any value stored in the config file.
// If nil, the existing config MTU (if non-zero) is preserved; otherwise it defaults to 1280.
// Set to a higher value (e.g. 1400) if carrying QUIC or other protocols that require larger datagrams.
MTU *uint16
}
// validateCredentials checks that exactly one credential type is provided
@@ -112,6 +119,12 @@ func New(opts Options) (*Client, error) {
return nil, err
}
if opts.MTU != nil {
if err := iface.ValidateMTU(*opts.MTU); err != nil {
return nil, fmt.Errorf("invalid MTU: %w", err)
}
}
if opts.LogOutput != nil {
logrus.SetOutput(opts.LogOutput)
}
@@ -151,6 +164,7 @@ func New(opts Options) (*Client, error) {
DisableClientRoutes: &opts.DisableClientRoutes,
BlockInbound: &opts.BlockInbound,
WireguardPort: opts.WireguardPort,
MTU: opts.MTU,
}
if opts.ConfigPath != "" {
config, err = profilemanager.UpdateOrCreateConfig(input)
@@ -202,7 +216,7 @@ func (c *Client) Start(startCtx context.Context) error {
if err, _ := authClient.Login(ctx, c.setupKey, c.jwtToken); err != nil {
return fmt.Errorf("login: %w", err)
}
client := internal.NewConnectClient(ctx, c.config, c.recorder, false)
client := internal.NewConnectClient(ctx, c.config, c.recorder)
client.SetSyncResponsePersistence(true)
// either startup error (permanent backoff err) or nil err (successful engine up)

View File

@@ -27,8 +27,8 @@ import (
"github.com/netbirdio/netbird/client/internal/profilemanager"
"github.com/netbirdio/netbird/client/internal/statemanager"
"github.com/netbirdio/netbird/client/internal/stdnet"
"github.com/netbirdio/netbird/client/internal/updatemanager"
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
"github.com/netbirdio/netbird/client/internal/updater"
"github.com/netbirdio/netbird/client/internal/updater/installer"
nbnet "github.com/netbirdio/netbird/client/net"
cProto "github.com/netbirdio/netbird/client/proto"
"github.com/netbirdio/netbird/client/ssh"
@@ -44,13 +44,13 @@ import (
)
type ConnectClient struct {
ctx context.Context
config *profilemanager.Config
statusRecorder *peer.Status
doInitialAutoUpdate bool
ctx context.Context
config *profilemanager.Config
statusRecorder *peer.Status
engine *Engine
engineMutex sync.Mutex
engine *Engine
engineMutex sync.Mutex
updateManager *updater.Manager
persistSyncResponse bool
}
@@ -59,17 +59,19 @@ func NewConnectClient(
ctx context.Context,
config *profilemanager.Config,
statusRecorder *peer.Status,
doInitalAutoUpdate bool,
) *ConnectClient {
return &ConnectClient{
ctx: ctx,
config: config,
statusRecorder: statusRecorder,
doInitialAutoUpdate: doInitalAutoUpdate,
engineMutex: sync.Mutex{},
ctx: ctx,
config: config,
statusRecorder: statusRecorder,
engineMutex: sync.Mutex{},
}
}
func (c *ConnectClient) SetUpdateManager(um *updater.Manager) {
c.updateManager = um
}
// Run with main logic.
func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error {
return c.run(MobileDependency{}, runningChan, logPath)
@@ -187,14 +189,13 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
stateManager := statemanager.New(path)
stateManager.RegisterState(&sshconfig.ShutdownState{})
updateManager, err := updatemanager.NewManager(c.statusRecorder, stateManager)
if err == nil {
updateManager.CheckUpdateSuccess(c.ctx)
if c.updateManager != nil {
c.updateManager.CheckUpdateSuccess(c.ctx)
}
inst := installer.New()
if err := inst.CleanUpInstallerFiles(); err != nil {
log.Errorf("failed to clean up temporary installer file: %v", err)
}
inst := installer.New()
if err := inst.CleanUpInstallerFiles(); err != nil {
log.Errorf("failed to clean up temporary installer file: %v", err)
}
defer c.statusRecorder.ClientStop()
@@ -308,7 +309,15 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
checks := loginResp.GetChecks()
c.engineMutex.Lock()
engine := NewEngine(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, checks, stateManager)
engine := NewEngine(engineCtx, cancel, engineConfig, EngineServices{
SignalClient: signalClient,
MgmClient: mgmClient,
RelayManager: relayManager,
StatusRecorder: c.statusRecorder,
Checks: checks,
StateManager: stateManager,
UpdateManager: c.updateManager,
}, mobileDependency)
engine.SetSyncResponsePersistence(c.persistSyncResponse)
c.engine = engine
c.engineMutex.Unlock()
@@ -318,15 +327,6 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
return wrapErr(err)
}
if loginResp.PeerConfig != nil && loginResp.PeerConfig.AutoUpdate != nil {
// AutoUpdate will be true when the user click on "Connect" menu on the UI
if c.doInitialAutoUpdate {
log.Infof("start engine by ui, run auto-update check")
c.engine.InitialUpdateHandling(loginResp.PeerConfig.AutoUpdate)
c.doInitialAutoUpdate = false
}
}
log.Infof("Netbird engine started, the IP is: %s", peerConfig.GetAddress())
state.Set(StatusConnected)

View File

@@ -27,7 +27,7 @@ import (
"github.com/netbirdio/netbird/client/anonymize"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/profilemanager"
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
"github.com/netbirdio/netbird/client/internal/updater/installer"
nbstatus "github.com/netbirdio/netbird/client/status"
mgmProto "github.com/netbirdio/netbird/shared/management/proto"
"github.com/netbirdio/netbird/util"

View File

@@ -77,7 +77,7 @@ func (d *Resolver) ID() types.HandlerID {
return "local-resolver"
}
func (d *Resolver) ProbeAvailability() {}
func (d *Resolver) ProbeAvailability(context.Context) {}
// ServeDNS handles a DNS request
func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {

View File

@@ -104,12 +104,16 @@ type DefaultServer struct {
statusRecorder *peer.Status
stateManager *statemanager.Manager
probeMu sync.Mutex
probeCancel context.CancelFunc
probeWg sync.WaitGroup
}
type handlerWithStop interface {
dns.Handler
Stop()
ProbeAvailability()
ProbeAvailability(context.Context)
ID() types.HandlerID
}
@@ -362,7 +366,13 @@ func (s *DefaultServer) DnsIP() netip.Addr {
// Stop stops the server
func (s *DefaultServer) Stop() {
s.probeMu.Lock()
if s.probeCancel != nil {
s.probeCancel()
}
s.ctxCancel()
s.probeMu.Unlock()
s.probeWg.Wait()
s.shutdownWg.Wait()
s.mux.Lock()
@@ -479,7 +489,8 @@ func (s *DefaultServer) SearchDomains() []string {
}
// ProbeAvailability tests each upstream group's servers for availability
// and deactivates the group if no server responds
// and deactivates the group if no server responds.
// If a previous probe is still running, it will be cancelled before starting a new one.
func (s *DefaultServer) ProbeAvailability() {
if val := os.Getenv(envSkipDNSProbe); val != "" {
skipProbe, err := strconv.ParseBool(val)
@@ -492,15 +503,52 @@ func (s *DefaultServer) ProbeAvailability() {
}
}
var wg sync.WaitGroup
for _, mux := range s.dnsMuxMap {
wg.Add(1)
go func(mux handlerWithStop) {
defer wg.Done()
mux.ProbeAvailability()
}(mux.handler)
s.probeMu.Lock()
// don't start probes on a stopped server
if s.ctx.Err() != nil {
s.probeMu.Unlock()
return
}
// cancel any running probe
if s.probeCancel != nil {
s.probeCancel()
s.probeCancel = nil
}
// wait for the previous probe goroutines to finish while holding
// the mutex so no other caller can start a new probe concurrently
s.probeWg.Wait()
// start a new probe
probeCtx, probeCancel := context.WithCancel(s.ctx)
s.probeCancel = probeCancel
s.probeWg.Add(1)
defer s.probeWg.Done()
// Snapshot handlers under s.mux to avoid racing with updateMux/dnsMuxMap writers.
s.mux.Lock()
handlers := make([]handlerWithStop, 0, len(s.dnsMuxMap))
for _, mux := range s.dnsMuxMap {
handlers = append(handlers, mux.handler)
}
s.mux.Unlock()
var wg sync.WaitGroup
for _, handler := range handlers {
wg.Add(1)
go func(h handlerWithStop) {
defer wg.Done()
h.ProbeAvailability(probeCtx)
}(handler)
}
s.probeMu.Unlock()
wg.Wait()
probeCancel()
}
func (s *DefaultServer) UpdateServerConfig(domains dnsconfig.ServerDomains) error {

View File

@@ -1065,7 +1065,7 @@ type mockHandler struct {
func (m *mockHandler) ServeDNS(dns.ResponseWriter, *dns.Msg) {}
func (m *mockHandler) Stop() {}
func (m *mockHandler) ProbeAvailability() {}
func (m *mockHandler) ProbeAvailability(context.Context) {}
func (m *mockHandler) ID() types.HandlerID { return types.HandlerID(m.Id) }
type mockService struct{}

View File

@@ -6,6 +6,7 @@ import (
"net"
"net/netip"
"runtime"
"strconv"
"sync"
"time"
@@ -69,7 +70,7 @@ func (s *serviceViaListener) Listen() error {
return fmt.Errorf("eval listen address: %w", err)
}
s.listenIP = s.listenIP.Unmap()
s.server.Addr = fmt.Sprintf("%s:%d", s.listenIP, s.listenPort)
s.server.Addr = net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort)))
log.Debugf("starting dns on %s", s.server.Addr)
go func() {
s.setListenerStatus(true)
@@ -186,7 +187,7 @@ func (s *serviceViaListener) testFreePort(port int) (netip.Addr, bool) {
}
func (s *serviceViaListener) tryToBind(ip netip.Addr, port int) bool {
addrString := fmt.Sprintf("%s:%d", ip, port)
addrString := net.JoinHostPort(ip.String(), strconv.Itoa(port))
udpAddr := net.UDPAddrFromAddrPort(netip.MustParseAddrPort(addrString))
probeListener, err := net.ListenUDP("udp", udpAddr)
if err != nil {

View File

@@ -65,6 +65,7 @@ type upstreamResolverBase struct {
mutex sync.Mutex
reactivatePeriod time.Duration
upstreamTimeout time.Duration
wg sync.WaitGroup
deactivate func(error)
reactivate func()
@@ -115,6 +116,11 @@ func (u *upstreamResolverBase) MatchSubdomains() bool {
func (u *upstreamResolverBase) Stop() {
log.Debugf("stopping serving DNS for upstreams %s", u.upstreamServers)
u.cancel()
u.mutex.Lock()
u.wg.Wait()
u.mutex.Unlock()
}
// ServeDNS handles a DNS request
@@ -260,16 +266,10 @@ func formatFailures(failures []upstreamFailure) string {
// ProbeAvailability tests all upstream servers simultaneously and
// disables the resolver if none work
func (u *upstreamResolverBase) ProbeAvailability() {
func (u *upstreamResolverBase) ProbeAvailability(ctx context.Context) {
u.mutex.Lock()
defer u.mutex.Unlock()
select {
case <-u.ctx.Done():
return
default:
}
// avoid probe if upstreams could resolve at least one query
if u.successCount.Load() > 0 {
return
@@ -279,31 +279,39 @@ func (u *upstreamResolverBase) ProbeAvailability() {
var mu sync.Mutex
var wg sync.WaitGroup
var errors *multierror.Error
var errs *multierror.Error
for _, upstream := range u.upstreamServers {
upstream := upstream
wg.Add(1)
go func() {
go func(upstream netip.AddrPort) {
defer wg.Done()
err := u.testNameserver(upstream, 500*time.Millisecond)
err := u.testNameserver(u.ctx, ctx, upstream, 500*time.Millisecond)
if err != nil {
errors = multierror.Append(errors, err)
mu.Lock()
errs = multierror.Append(errs, err)
mu.Unlock()
log.Warnf("probing upstream nameserver %s: %s", upstream, err)
return
}
mu.Lock()
defer mu.Unlock()
success = true
}()
mu.Unlock()
}(upstream)
}
wg.Wait()
select {
case <-ctx.Done():
return
case <-u.ctx.Done():
return
default:
}
// didn't find a working upstream server, let's disable and try later
if !success {
u.disable(errors.ErrorOrNil())
u.disable(errs.ErrorOrNil())
if u.statusRecorder == nil {
return
@@ -339,7 +347,7 @@ func (u *upstreamResolverBase) waitUntilResponse() {
}
for _, upstream := range u.upstreamServers {
if err := u.testNameserver(upstream, probeTimeout); err != nil {
if err := u.testNameserver(u.ctx, nil, upstream, probeTimeout); err != nil {
log.Tracef("upstream check for %s: %s", upstream, err)
} else {
// at least one upstream server is available, stop probing
@@ -364,7 +372,9 @@ func (u *upstreamResolverBase) waitUntilResponse() {
log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServersString())
u.successCount.Add(1)
u.reactivate()
u.mutex.Lock()
u.disabled = false
u.mutex.Unlock()
}
// isTimeout returns true if the given error is a network timeout error.
@@ -387,7 +397,11 @@ func (u *upstreamResolverBase) disable(err error) {
u.successCount.Store(0)
u.deactivate(err)
u.disabled = true
go u.waitUntilResponse()
u.wg.Add(1)
go func() {
defer u.wg.Done()
u.waitUntilResponse()
}()
}
func (u *upstreamResolverBase) upstreamServersString() string {
@@ -398,13 +412,18 @@ func (u *upstreamResolverBase) upstreamServersString() string {
return strings.Join(servers, ", ")
}
func (u *upstreamResolverBase) testNameserver(server netip.AddrPort, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(u.ctx, timeout)
func (u *upstreamResolverBase) testNameserver(baseCtx context.Context, externalCtx context.Context, server netip.AddrPort, timeout time.Duration) error {
mergedCtx, cancel := context.WithTimeout(baseCtx, timeout)
defer cancel()
if externalCtx != nil {
stop2 := context.AfterFunc(externalCtx, cancel)
defer stop2()
}
r := new(dns.Msg).SetQuestion(testRecord, dns.TypeSOA)
_, _, err := u.upstreamClient.exchange(ctx, server.String(), r)
_, _, err := u.upstreamClient.exchange(mergedCtx, server.String(), r)
return err
}

View File

@@ -188,7 +188,7 @@ func TestUpstreamResolver_DeactivationReactivation(t *testing.T) {
reactivated = true
}
resolver.ProbeAvailability()
resolver.ProbeAvailability(context.TODO())
if !failed {
t.Errorf("expected that resolving was deactivated")

View File

@@ -51,7 +51,7 @@ import (
"github.com/netbirdio/netbird/client/internal/routemanager"
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
"github.com/netbirdio/netbird/client/internal/statemanager"
"github.com/netbirdio/netbird/client/internal/updatemanager"
"github.com/netbirdio/netbird/client/internal/updater"
"github.com/netbirdio/netbird/client/jobexec"
cProto "github.com/netbirdio/netbird/client/proto"
"github.com/netbirdio/netbird/client/system"
@@ -79,7 +79,6 @@ const (
var ErrResetConnection = fmt.Errorf("reset connection")
// EngineConfig is a config for the Engine
type EngineConfig struct {
WgPort int
WgIfaceName string
@@ -141,6 +140,17 @@ type EngineConfig struct {
LogPath string
}
// EngineServices holds the external service dependencies required by the Engine.
type EngineServices struct {
SignalClient signal.Client
MgmClient mgm.Client
RelayManager *relayClient.Manager
StatusRecorder *peer.Status
Checks []*mgmProto.Checks
StateManager *statemanager.Manager
UpdateManager *updater.Manager
}
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
type Engine struct {
// signal is a Signal Service client
@@ -209,7 +219,7 @@ type Engine struct {
flowManager nftypes.FlowManager
// auto-update
updateManager *updatemanager.Manager
updateManager *updater.Manager
// WireGuard interface monitor
wgIfaceMonitor *WGIfaceMonitor
@@ -239,22 +249,17 @@ type localIpUpdater interface {
func NewEngine(
clientCtx context.Context,
clientCancel context.CancelFunc,
signalClient signal.Client,
mgmClient mgm.Client,
relayManager *relayClient.Manager,
config *EngineConfig,
services EngineServices,
mobileDep MobileDependency,
statusRecorder *peer.Status,
checks []*mgmProto.Checks,
stateManager *statemanager.Manager,
) *Engine {
engine := &Engine{
clientCtx: clientCtx,
clientCancel: clientCancel,
signal: signalClient,
signaler: peer.NewSignaler(signalClient, config.WgPrivateKey),
mgmClient: mgmClient,
relayManager: relayManager,
signal: services.SignalClient,
signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey),
mgmClient: services.MgmClient,
relayManager: services.RelayManager,
peerStore: peerstore.NewConnStore(),
syncMsgMux: &sync.Mutex{},
config: config,
@@ -262,11 +267,12 @@ func NewEngine(
STUNs: []*stun.URI{},
TURNs: []*stun.URI{},
networkSerial: 0,
statusRecorder: statusRecorder,
stateManager: stateManager,
checks: checks,
statusRecorder: services.StatusRecorder,
stateManager: services.StateManager,
checks: services.Checks,
probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL),
jobExecutor: jobexec.NewExecutor(),
updateManager: services.UpdateManager,
}
log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String())
@@ -309,7 +315,7 @@ func (e *Engine) Stop() error {
}
if e.updateManager != nil {
e.updateManager.Stop()
e.updateManager.SetDownloadOnly()
}
log.Info("cleaning up status recorder states")
@@ -559,13 +565,6 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL)
return nil
}
func (e *Engine) InitialUpdateHandling(autoUpdateSettings *mgmProto.AutoUpdateSettings) {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
e.handleAutoUpdateVersion(autoUpdateSettings, true)
}
func (e *Engine) createFirewall() error {
if e.config.DisableFirewall {
log.Infof("firewall is disabled")
@@ -793,39 +792,22 @@ func (e *Engine) PopulateNetbirdConfig(netbirdConfig *mgmProto.NetbirdConfig, mg
return nil
}
func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdateSettings, initialCheck bool) {
func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdateSettings) {
if e.updateManager == nil {
return
}
if autoUpdateSettings == nil {
return
}
disabled := autoUpdateSettings.Version == disableAutoUpdate
// stop and cleanup if disabled
if e.updateManager != nil && disabled {
log.Infof("auto-update is disabled, stopping update manager")
e.updateManager.Stop()
e.updateManager = nil
if autoUpdateSettings.Version == disableAutoUpdate {
log.Infof("auto-update is disabled")
e.updateManager.SetDownloadOnly()
return
}
// Skip check unless AlwaysUpdate is enabled or this is the initial check at startup
if !autoUpdateSettings.AlwaysUpdate && !initialCheck {
log.Debugf("skipping auto-update check, AlwaysUpdate is false and this is not the initial check")
return
}
// Start manager if needed
if e.updateManager == nil {
log.Infof("starting auto-update manager")
updateManager, err := updatemanager.NewManager(e.statusRecorder, e.stateManager)
if err != nil {
return
}
e.updateManager = updateManager
e.updateManager.Start(e.ctx)
}
log.Infof("handling auto-update version: %s", autoUpdateSettings.Version)
e.updateManager.SetVersion(autoUpdateSettings.Version)
e.updateManager.SetVersion(autoUpdateSettings.Version, autoUpdateSettings.AlwaysUpdate)
}
func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
@@ -842,7 +824,7 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
}
if update.NetworkMap != nil && update.NetworkMap.PeerConfig != nil {
e.handleAutoUpdateVersion(update.NetworkMap.PeerConfig.AutoUpdate, false)
e.handleAutoUpdateVersion(update.NetworkMap.PeerConfig.AutoUpdate)
}
if update.GetNetbirdConfig() != nil {
@@ -1315,8 +1297,7 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error {
// Test received (upstream) servers for availability right away instead of upon usage.
// If no server of a server group responds this will disable the respective handler and retry later.
e.dnsServer.ProbeAvailability()
go e.dnsServer.ProbeAvailability()
return nil
}

View File

@@ -251,9 +251,6 @@ func TestEngine_SSH(t *testing.T) {
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
engine := NewEngine(
ctx, cancel,
&signal.MockClient{},
&mgmt.MockClient{},
relayMgr,
&EngineConfig{
WgIfaceName: "utun101",
WgAddr: "100.64.0.1/24",
@@ -263,10 +260,13 @@ func TestEngine_SSH(t *testing.T) {
MTU: iface.DefaultMTU,
SSHKey: sshKey,
},
EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{},
RelayManager: relayMgr,
StatusRecorder: peer.NewRecorder("https://mgm"),
},
MobileDependency{},
peer.NewRecorder("https://mgm"),
nil,
nil,
)
engine.dnsServer = &dns.MockServer{
@@ -428,13 +428,18 @@ func TestEngine_UpdateNetworkMap(t *testing.T) {
defer cancel()
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
engine := NewEngine(ctx, cancel, &EngineConfig{
WgIfaceName: "utun102",
WgAddr: "100.64.0.1/24",
WgPrivateKey: key,
WgPort: 33100,
MTU: iface.DefaultMTU,
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
}, EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{},
RelayManager: relayMgr,
StatusRecorder: peer.NewRecorder("https://mgm"),
}, MobileDependency{})
wgIface := &MockWGIface{
NameFunc: func() string { return "utun102" },
@@ -647,13 +652,18 @@ func TestEngine_Sync(t *testing.T) {
return nil
}
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{SyncFunc: syncFunc}, relayMgr, &EngineConfig{
engine := NewEngine(ctx, cancel, &EngineConfig{
WgIfaceName: "utun103",
WgAddr: "100.64.0.1/24",
WgPrivateKey: key,
WgPort: 33100,
MTU: iface.DefaultMTU,
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
}, EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{SyncFunc: syncFunc},
RelayManager: relayMgr,
StatusRecorder: peer.NewRecorder("https://mgm"),
}, MobileDependency{})
engine.ctx = ctx
engine.dnsServer = &dns.MockServer{
@@ -812,13 +822,18 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
engine := NewEngine(ctx, cancel, &EngineConfig{
WgIfaceName: wgIfaceName,
WgAddr: wgAddr,
WgPrivateKey: key,
WgPort: 33100,
MTU: iface.DefaultMTU,
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
}, EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{},
RelayManager: relayMgr,
StatusRecorder: peer.NewRecorder("https://mgm"),
}, MobileDependency{})
engine.ctx = ctx
newNet, err := stdnet.NewNet(context.Background(), nil)
if err != nil {
@@ -1014,13 +1029,18 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) {
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
engine := NewEngine(ctx, cancel, &EngineConfig{
WgIfaceName: wgIfaceName,
WgAddr: wgAddr,
WgPrivateKey: key,
WgPort: 33100,
MTU: iface.DefaultMTU,
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
}, EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{},
RelayManager: relayMgr,
StatusRecorder: peer.NewRecorder("https://mgm"),
}, MobileDependency{})
engine.ctx = ctx
newNet, err := stdnet.NewNet(context.Background(), nil)
@@ -1546,7 +1566,12 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin
}
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
e, err := NewEngine(ctx, cancel, signalClient, mgmtClient, relayMgr, conf, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil), nil
e, err := NewEngine(ctx, cancel, conf, EngineServices{
SignalClient: signalClient,
MgmClient: mgmtClient,
RelayManager: relayMgr,
StatusRecorder: peer.NewRecorder("https://mgm"),
}, MobileDependency{}), nil
e.ctx = ctx
return e, err
}

View File

@@ -12,9 +12,10 @@ const renewTimeout = 10 * time.Second
// Response holds the response from exposing a service.
type Response struct {
ServiceName string
ServiceURL string
Domain string
ServiceName string
ServiceURL string
Domain string
PortAutoAssigned bool
}
type Request struct {
@@ -25,6 +26,7 @@ type Request struct {
Pin string
Password string
UserGroups []string
ListenPort uint16
}
type ManagementClient interface {

View File

@@ -15,6 +15,7 @@ func NewRequest(req *daemonProto.ExposeServiceRequest) *Request {
UserGroups: req.UserGroups,
Domain: req.Domain,
NamePrefix: req.NamePrefix,
ListenPort: uint16(req.ListenPort),
}
}
@@ -27,13 +28,15 @@ func toClientExposeRequest(req Request) mgm.ExposeRequest {
Pin: req.Pin,
Password: req.Password,
UserGroups: req.UserGroups,
ListenPort: req.ListenPort,
}
}
func fromClientExposeResponse(response *mgm.ExposeResponse) *Response {
return &Response{
ServiceName: response.ServiceName,
Domain: response.Domain,
ServiceURL: response.ServiceURL,
ServiceName: response.ServiceName,
Domain: response.Domain,
ServiceURL: response.ServiceURL,
PortAutoAssigned: response.PortAutoAssigned,
}
}

View File

@@ -3,7 +3,9 @@ package client
import (
"context"
"fmt"
"net"
"reflect"
"strconv"
"time"
log "github.com/sirupsen/logrus"
@@ -564,7 +566,7 @@ func HandlerFromRoute(params common.HandlerParams) RouteHandler {
return dnsinterceptor.New(params)
case handlerTypeDynamic:
dns := nbdns.NewServiceViaMemory(params.WgInterface)
dnsAddr := fmt.Sprintf("%s:%d", dns.RuntimeIP(), dns.RuntimePort())
dnsAddr := net.JoinHostPort(dns.RuntimeIP().String(), strconv.Itoa(dns.RuntimePort()))
return dynamic.NewRoute(params, dnsAddr)
default:
return static.NewRoute(params)

View File

@@ -4,8 +4,10 @@ import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
@@ -249,7 +251,7 @@ func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
r.MsgHdr.AuthenticatedData = true
}
upstream := fmt.Sprintf("%s:%d", upstreamIP.String(), uint16(d.forwarderPort.Load()))
upstream := net.JoinHostPort(upstreamIP.String(), strconv.FormatUint(uint64(d.forwarderPort.Load()), 10))
ctx, cancel := context.WithTimeout(context.Background(), dnsTimeout)
defer cancel()

View File

@@ -1,214 +0,0 @@
//go:build windows || darwin
package updatemanager
import (
"context"
"fmt"
"path"
"testing"
"time"
v "github.com/hashicorp/go-version"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/statemanager"
)
type versionUpdateMock struct {
latestVersion *v.Version
onUpdate func()
}
func (v versionUpdateMock) StopWatch() {}
func (v versionUpdateMock) SetDaemonVersion(newVersion string) bool {
return false
}
func (v *versionUpdateMock) SetOnUpdateListener(updateFn func()) {
v.onUpdate = updateFn
}
func (v versionUpdateMock) LatestVersion() *v.Version {
return v.latestVersion
}
func (v versionUpdateMock) StartFetcher() {}
func Test_LatestVersion(t *testing.T) {
testMatrix := []struct {
name string
daemonVersion string
initialLatestVersion *v.Version
latestVersion *v.Version
shouldUpdateInit bool
shouldUpdateLater bool
}{
{
name: "Should only trigger update once due to time between triggers being < 5 Minutes",
daemonVersion: "1.0.0",
initialLatestVersion: v.Must(v.NewSemver("1.0.1")),
latestVersion: v.Must(v.NewSemver("1.0.2")),
shouldUpdateInit: true,
shouldUpdateLater: false,
},
{
name: "Shouldn't update initially, but should update as soon as latest version is fetched",
daemonVersion: "1.0.0",
initialLatestVersion: nil,
latestVersion: v.Must(v.NewSemver("1.0.1")),
shouldUpdateInit: false,
shouldUpdateLater: true,
},
}
for idx, c := range testMatrix {
mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion}
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
m, _ := newManager(peer.NewRecorder(""), statemanager.New(tmpFile))
m.update = mockUpdate
targetVersionChan := make(chan string, 1)
m.triggerUpdateFn = func(ctx context.Context, targetVersion string) error {
targetVersionChan <- targetVersion
return nil
}
m.currentVersion = c.daemonVersion
m.Start(context.Background())
m.SetVersion("latest")
var triggeredInit bool
select {
case targetVersion := <-targetVersionChan:
if targetVersion != c.initialLatestVersion.String() {
t.Errorf("%s: Initial update version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), targetVersion)
}
triggeredInit = true
case <-time.After(10 * time.Millisecond):
triggeredInit = false
}
if triggeredInit != c.shouldUpdateInit {
t.Errorf("%s: Initial update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit)
}
mockUpdate.latestVersion = c.latestVersion
mockUpdate.onUpdate()
var triggeredLater bool
select {
case targetVersion := <-targetVersionChan:
if targetVersion != c.latestVersion.String() {
t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), targetVersion)
}
triggeredLater = true
case <-time.After(10 * time.Millisecond):
triggeredLater = false
}
if triggeredLater != c.shouldUpdateLater {
t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater)
}
m.Stop()
}
}
func Test_HandleUpdate(t *testing.T) {
testMatrix := []struct {
name string
daemonVersion string
latestVersion *v.Version
expectedVersion string
shouldUpdate bool
}{
{
name: "Update to a specific version should update regardless of if latestVersion is available yet",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "0.56.0",
shouldUpdate: true,
},
{
name: "Update to specific version should not update if version matches",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "0.55.0",
shouldUpdate: false,
},
{
name: "Update to specific version should not update if current version is newer",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "0.54.0",
shouldUpdate: false,
},
{
name: "Update to latest version should update if latest is newer",
daemonVersion: "0.55.0",
latestVersion: v.Must(v.NewSemver("0.56.0")),
expectedVersion: "latest",
shouldUpdate: true,
},
{
name: "Update to latest version should not update if latest == current",
daemonVersion: "0.56.0",
latestVersion: v.Must(v.NewSemver("0.56.0")),
expectedVersion: "latest",
shouldUpdate: false,
},
{
name: "Should not update if daemon version is invalid",
daemonVersion: "development",
latestVersion: v.Must(v.NewSemver("1.0.0")),
expectedVersion: "latest",
shouldUpdate: false,
},
{
name: "Should not update if expecting latest and latest version is unavailable",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "latest",
shouldUpdate: false,
},
{
name: "Should not update if expected version is invalid",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "development",
shouldUpdate: false,
},
}
for idx, c := range testMatrix {
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
m, _ := newManager(peer.NewRecorder(""), statemanager.New(tmpFile))
m.update = &versionUpdateMock{latestVersion: c.latestVersion}
targetVersionChan := make(chan string, 1)
m.triggerUpdateFn = func(ctx context.Context, targetVersion string) error {
targetVersionChan <- targetVersion
return nil
}
m.currentVersion = c.daemonVersion
m.Start(context.Background())
m.SetVersion(c.expectedVersion)
var updateTriggered bool
select {
case targetVersion := <-targetVersionChan:
if c.expectedVersion == "latest" && targetVersion != c.latestVersion.String() {
t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), targetVersion)
} else if c.expectedVersion != "latest" && targetVersion != c.expectedVersion {
t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.expectedVersion, targetVersion)
}
updateTriggered = true
case <-time.After(10 * time.Millisecond):
updateTriggered = false
}
if updateTriggered != c.shouldUpdate {
t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdate, updateTriggered)
}
m.Stop()
}
}

View File

@@ -1,39 +0,0 @@
//go:build !windows && !darwin
package updatemanager
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/statemanager"
)
// Manager is a no-op stub for unsupported platforms
type Manager struct{}
// NewManager returns a no-op manager for unsupported platforms
func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) {
return nil, fmt.Errorf("update manager is not supported on this platform")
}
// CheckUpdateSuccess is a no-op on unsupported platforms
func (m *Manager) CheckUpdateSuccess(ctx context.Context) {
// no-op
}
// Start is a no-op on unsupported platforms
func (m *Manager) Start(ctx context.Context) {
// no-op
}
// SetVersion is a no-op on unsupported platforms
func (m *Manager) SetVersion(expectedVersion string) {
// no-op
}
// Stop is a no-op on unsupported platforms
func (m *Manager) Stop() {
// no-op
}

View File

@@ -1,4 +1,4 @@
// Package updatemanager provides automatic update management for the NetBird client.
// Package updater provides automatic update management for the NetBird client.
// It monitors for new versions, handles update triggers from management server directives,
// and orchestrates the download and installation of client updates.
//
@@ -32,4 +32,4 @@
//
// This enables verification of successful updates and appropriate user notification
// after the client restarts with the new version.
package updatemanager
package updater

View File

@@ -16,8 +16,8 @@ import (
goversion "github.com/hashicorp/go-version"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/updatemanager/downloader"
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
"github.com/netbirdio/netbird/client/internal/updater/downloader"
"github.com/netbirdio/netbird/client/internal/updater/reposign"
)
type Installer struct {

View File

@@ -203,7 +203,10 @@ func (rh *ResultHandler) write(result Result) error {
func (rh *ResultHandler) cleanup() error {
err := os.Remove(rh.resultFile)
if err != nil && !os.IsNotExist(err) {
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
log.Debugf("delete installer result file: %s", rh.resultFile)

View File

@@ -1,12 +1,9 @@
//go:build windows || darwin
package updatemanager
package updater
import (
"context"
"errors"
"fmt"
"runtime"
"sync"
"time"
@@ -15,7 +12,7 @@ import (
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/statemanager"
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
"github.com/netbirdio/netbird/client/internal/updater/installer"
cProto "github.com/netbirdio/netbird/client/proto"
"github.com/netbirdio/netbird/version"
)
@@ -41,6 +38,9 @@ type Manager struct {
statusRecorder *peer.Status
stateManager *statemanager.Manager
downloadOnly bool // true when no enforcement from management; notifies UI to download latest
forceUpdate bool // true when management sets AlwaysUpdate; skips UI interaction and installs directly
lastTrigger time.Time
mgmUpdateChan chan struct{}
updateChannel chan struct{}
@@ -53,37 +53,38 @@ type Manager struct {
expectedVersion *v.Version
updateToLatestVersion bool
// updateMutex protect update and expectedVersion fields
pendingVersion *v.Version
// updateMutex protects update, expectedVersion, updateToLatestVersion,
// downloadOnly, forceUpdate, pendingVersion, and lastTrigger fields
updateMutex sync.Mutex
triggerUpdateFn func(context.Context, string) error
// installMutex and installing guard against concurrent installation attempts
installMutex sync.Mutex
installing bool
// protect to start the service multiple times
mu sync.Mutex
autoUpdateSupported func() bool
}
func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) {
if runtime.GOOS == "darwin" {
isBrew := !installer.TypeOfInstaller(context.Background()).Downloadable()
if isBrew {
log.Warnf("auto-update disabled on Home Brew installation")
return nil, fmt.Errorf("auto-update not supported on Home Brew installation yet")
}
}
return newManager(statusRecorder, stateManager)
}
func newManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) {
// NewManager creates a new update manager. The manager is single-use: once Stop() is called, it cannot be restarted.
func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) *Manager {
manager := &Manager{
statusRecorder: statusRecorder,
stateManager: stateManager,
mgmUpdateChan: make(chan struct{}, 1),
updateChannel: make(chan struct{}, 1),
currentVersion: version.NetbirdVersion(),
update: version.NewUpdate("nb/client"),
statusRecorder: statusRecorder,
stateManager: stateManager,
mgmUpdateChan: make(chan struct{}, 1),
updateChannel: make(chan struct{}, 1),
currentVersion: version.NetbirdVersion(),
update: version.NewUpdate("nb/client"),
downloadOnly: true,
autoUpdateSupported: isAutoUpdateSupported,
}
manager.triggerUpdateFn = manager.triggerUpdate
stateManager.RegisterState(&UpdateState{})
return manager, nil
return manager
}
// CheckUpdateSuccess checks if the update was successful and send a notification.
@@ -124,8 +125,10 @@ func (m *Manager) CheckUpdateSuccess(ctx context.Context) {
}
func (m *Manager) Start(ctx context.Context) {
log.Infof("starting update manager")
m.mu.Lock()
defer m.mu.Unlock()
if m.cancel != nil {
log.Errorf("Manager already started")
return
}
@@ -142,13 +145,32 @@ func (m *Manager) Start(ctx context.Context) {
m.cancel = cancel
m.wg.Add(1)
go m.updateLoop(ctx)
go func() {
defer m.wg.Done()
m.updateLoop(ctx)
}()
}
func (m *Manager) SetVersion(expectedVersion string) {
log.Infof("set expected agent version for upgrade: %s", expectedVersion)
if m.cancel == nil {
log.Errorf("manager not started")
func (m *Manager) SetDownloadOnly() {
m.updateMutex.Lock()
m.downloadOnly = true
m.forceUpdate = false
m.expectedVersion = nil
m.updateToLatestVersion = false
m.lastTrigger = time.Time{}
m.updateMutex.Unlock()
select {
case m.mgmUpdateChan <- struct{}{}:
default:
}
}
func (m *Manager) SetVersion(expectedVersion string, forceUpdate bool) {
log.Infof("expected version changed to %s, force update: %t", expectedVersion, forceUpdate)
if !m.autoUpdateSupported() {
log.Warnf("auto-update not supported on this platform")
return
}
@@ -159,6 +181,7 @@ func (m *Manager) SetVersion(expectedVersion string) {
log.Errorf("empty expected version provided")
m.expectedVersion = nil
m.updateToLatestVersion = false
m.downloadOnly = true
return
}
@@ -178,12 +201,97 @@ func (m *Manager) SetVersion(expectedVersion string) {
m.updateToLatestVersion = false
}
m.lastTrigger = time.Time{}
m.downloadOnly = false
m.forceUpdate = forceUpdate
select {
case m.mgmUpdateChan <- struct{}{}:
default:
}
}
// Install triggers the installation of the pending version. It is called when the user clicks the install button in the UI.
func (m *Manager) Install(ctx context.Context) error {
if !m.autoUpdateSupported() {
return fmt.Errorf("auto-update not supported on this platform")
}
m.updateMutex.Lock()
pending := m.pendingVersion
m.updateMutex.Unlock()
if pending == nil {
return fmt.Errorf("no pending version to install")
}
return m.tryInstall(ctx, pending)
}
// tryInstall ensures only one installation runs at a time. Concurrent callers
// receive an error immediately rather than queuing behind a running install.
func (m *Manager) tryInstall(ctx context.Context, targetVersion *v.Version) error {
m.installMutex.Lock()
if m.installing {
m.installMutex.Unlock()
return fmt.Errorf("installation already in progress")
}
m.installing = true
m.installMutex.Unlock()
defer func() {
m.installMutex.Lock()
m.installing = false
m.installMutex.Unlock()
}()
return m.install(ctx, targetVersion)
}
// NotifyUI re-publishes the current update state to a newly connected UI client.
// Only needed for download-only mode where the latest version is already cached
// NotifyUI re-publishes the current update state so a newly connected UI gets the info.
func (m *Manager) NotifyUI() {
m.updateMutex.Lock()
if m.update == nil {
m.updateMutex.Unlock()
return
}
downloadOnly := m.downloadOnly
pendingVersion := m.pendingVersion
latestVersion := m.update.LatestVersion()
m.updateMutex.Unlock()
if downloadOnly {
if latestVersion == nil {
return
}
currentVersion, err := v.NewVersion(m.currentVersion)
if err != nil || currentVersion.GreaterThanOrEqual(latestVersion) {
return
}
m.statusRecorder.PublishEvent(
cProto.SystemEvent_INFO,
cProto.SystemEvent_SYSTEM,
"New version available",
"",
map[string]string{"new_version_available": latestVersion.String()},
)
return
}
if pendingVersion != nil {
m.statusRecorder.PublishEvent(
cProto.SystemEvent_INFO,
cProto.SystemEvent_SYSTEM,
"New version available",
"",
map[string]string{"new_version_available": pendingVersion.String(), "enforced": "true"},
)
}
}
// Stop is not used at the moment because it fully depends on the daemon. In a future refactor it may make sense to use it.
func (m *Manager) Stop() {
if m.cancel == nil {
return
@@ -214,8 +322,6 @@ func (m *Manager) onContextCancel() {
}
func (m *Manager) updateLoop(ctx context.Context) {
defer m.wg.Done()
for {
select {
case <-ctx.Done():
@@ -239,55 +345,89 @@ func (m *Manager) handleUpdate(ctx context.Context) {
return
}
expectedVersion := m.expectedVersion
useLatest := m.updateToLatestVersion
downloadOnly := m.downloadOnly
forceUpdate := m.forceUpdate
curLatestVersion := m.update.LatestVersion()
m.updateMutex.Unlock()
switch {
// Resolve "latest" to actual version
case useLatest:
// Download-only mode or resolve "latest" to actual version
case downloadOnly, m.updateToLatestVersion:
if curLatestVersion == nil {
log.Tracef("latest version not fetched yet")
m.updateMutex.Unlock()
return
}
updateVersion = curLatestVersion
// Update to specific version
case expectedVersion != nil:
updateVersion = expectedVersion
// Install to specific version
case m.expectedVersion != nil:
updateVersion = m.expectedVersion
default:
log.Debugf("no expected version information set")
m.updateMutex.Unlock()
return
}
log.Debugf("checking update option, current version: %s, target version: %s", m.currentVersion, updateVersion)
if !m.shouldUpdate(updateVersion) {
if !m.shouldUpdate(updateVersion, forceUpdate) {
m.updateMutex.Unlock()
return
}
m.lastTrigger = time.Now()
log.Infof("Auto-update triggered, current version: %s, target version: %s", m.currentVersion, updateVersion)
m.statusRecorder.PublishEvent(
cProto.SystemEvent_CRITICAL,
cProto.SystemEvent_SYSTEM,
"Automatically updating client",
"Your client version is older than auto-update version set in Management, updating client now.",
nil,
)
log.Infof("new version available: %s", updateVersion)
if !downloadOnly && !forceUpdate {
m.pendingVersion = updateVersion
}
m.updateMutex.Unlock()
if downloadOnly {
m.statusRecorder.PublishEvent(
cProto.SystemEvent_INFO,
cProto.SystemEvent_SYSTEM,
"New version available",
"",
map[string]string{"new_version_available": updateVersion.String()},
)
return
}
if forceUpdate {
if err := m.tryInstall(ctx, updateVersion); err != nil {
log.Errorf("force update failed: %v", err)
}
return
}
m.statusRecorder.PublishEvent(
cProto.SystemEvent_INFO,
cProto.SystemEvent_SYSTEM,
"New version available",
"",
map[string]string{"new_version_available": updateVersion.String(), "enforced": "true"},
)
}
func (m *Manager) install(ctx context.Context, pendingVersion *v.Version) error {
m.statusRecorder.PublishEvent(
cProto.SystemEvent_CRITICAL,
cProto.SystemEvent_SYSTEM,
"Updating client",
"Installing update now.",
nil,
)
m.statusRecorder.PublishEvent(
cProto.SystemEvent_CRITICAL,
cProto.SystemEvent_SYSTEM,
"",
"",
map[string]string{"progress_window": "show", "version": updateVersion.String()},
map[string]string{"progress_window": "show", "version": pendingVersion.String()},
)
updateState := UpdateState{
PreUpdateVersion: m.currentVersion,
TargetVersion: updateVersion.String(),
TargetVersion: pendingVersion.String(),
}
if err := m.stateManager.UpdateState(updateState); err != nil {
log.Warnf("failed to update state: %v", err)
} else {
@@ -296,8 +436,9 @@ func (m *Manager) handleUpdate(ctx context.Context) {
}
}
if err := m.triggerUpdateFn(ctx, updateVersion.String()); err != nil {
log.Errorf("Error triggering auto-update: %v", err)
inst := installer.New()
if err := inst.RunInstallation(ctx, pendingVersion.String()); err != nil {
log.Errorf("error triggering update: %v", err)
m.statusRecorder.PublishEvent(
cProto.SystemEvent_ERROR,
cProto.SystemEvent_SYSTEM,
@@ -305,7 +446,9 @@ func (m *Manager) handleUpdate(ctx context.Context) {
fmt.Sprintf("Auto-update failed: %v", err),
nil,
)
return err
}
return nil
}
// loadAndDeleteUpdateState loads the update state, deletes it from storage, and returns it.
@@ -339,7 +482,7 @@ func (m *Manager) loadAndDeleteUpdateState(ctx context.Context) (*UpdateState, e
return updateState, nil
}
func (m *Manager) shouldUpdate(updateVersion *v.Version) bool {
func (m *Manager) shouldUpdate(updateVersion *v.Version, forceUpdate bool) bool {
if m.currentVersion == developmentVersion {
log.Debugf("skipping auto-update, running development version")
return false
@@ -354,8 +497,8 @@ func (m *Manager) shouldUpdate(updateVersion *v.Version) bool {
return false
}
if time.Since(m.lastTrigger) < 5*time.Minute {
log.Debugf("skipping auto-update, last update was %s ago", time.Since(m.lastTrigger))
if forceUpdate && time.Since(m.lastTrigger) < 3*time.Minute {
log.Infof("skipping auto-update, last update was %s ago", time.Since(m.lastTrigger))
return false
}
@@ -367,8 +510,3 @@ func (m *Manager) lastResultErrReason() string {
result := installer.NewResultHandler(inst.TempDir())
return result.GetErrorResultReason()
}
func (m *Manager) triggerUpdate(ctx context.Context, targetVersion string) error {
inst := installer.New()
return inst.RunInstallation(ctx, targetVersion)
}

View File

@@ -0,0 +1,111 @@
//go:build !windows && !darwin
package updater
import (
"context"
"fmt"
"path"
"testing"
"time"
v "github.com/hashicorp/go-version"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/statemanager"
)
// On Linux, only Mode 1 (downloadOnly) is supported.
// SetVersion is a no-op because auto-update installation is not supported.
func Test_LatestVersion_Linux(t *testing.T) {
testMatrix := []struct {
name string
daemonVersion string
initialLatestVersion *v.Version
latestVersion *v.Version
shouldUpdateInit bool
shouldUpdateLater bool
}{
{
name: "Should notify again when a newer version arrives even within 5 minutes",
daemonVersion: "1.0.0",
initialLatestVersion: v.Must(v.NewSemver("1.0.1")),
latestVersion: v.Must(v.NewSemver("1.0.2")),
shouldUpdateInit: true,
shouldUpdateLater: true,
},
{
name: "Shouldn't notify initially, but should notify as soon as latest version is fetched",
daemonVersion: "1.0.0",
initialLatestVersion: nil,
latestVersion: v.Must(v.NewSemver("1.0.1")),
shouldUpdateInit: false,
shouldUpdateLater: true,
},
}
for idx, c := range testMatrix {
mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion}
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
recorder := peer.NewRecorder("")
sub := recorder.SubscribeToEvents()
defer recorder.UnsubscribeFromEvents(sub)
m := NewManager(recorder, statemanager.New(tmpFile))
m.update = mockUpdate
m.currentVersion = c.daemonVersion
m.Start(context.Background())
m.SetDownloadOnly()
ver, enforced := waitForUpdateEvent(sub, 500*time.Millisecond)
triggeredInit := ver != ""
if enforced {
t.Errorf("%s: Linux Mode 1 must never have enforced metadata", c.name)
}
if triggeredInit != c.shouldUpdateInit {
t.Errorf("%s: Initial notify mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit)
}
if triggeredInit && c.initialLatestVersion != nil && ver != c.initialLatestVersion.String() {
t.Errorf("%s: Initial version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), ver)
}
mockUpdate.latestVersion = c.latestVersion
mockUpdate.onUpdate()
ver, enforced = waitForUpdateEvent(sub, 500*time.Millisecond)
triggeredLater := ver != ""
if enforced {
t.Errorf("%s: Linux Mode 1 must never have enforced metadata", c.name)
}
if triggeredLater != c.shouldUpdateLater {
t.Errorf("%s: Later notify mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater)
}
if triggeredLater && c.latestVersion != nil && ver != c.latestVersion.String() {
t.Errorf("%s: Later version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), ver)
}
m.Stop()
}
}
func Test_SetVersion_NoOp_Linux(t *testing.T) {
// On Linux, SetVersion should be a no-op — no events fired
tmpFile := path.Join(t.TempDir(), "update-test-noop.json")
recorder := peer.NewRecorder("")
sub := recorder.SubscribeToEvents()
defer recorder.UnsubscribeFromEvents(sub)
m := NewManager(recorder, statemanager.New(tmpFile))
m.update = &versionUpdateMock{latestVersion: v.Must(v.NewSemver("1.0.1"))}
m.currentVersion = "1.0.0"
m.Start(context.Background())
m.SetVersion("1.0.1", false)
ver, _ := waitForUpdateEvent(sub, 500*time.Millisecond)
if ver != "" {
t.Errorf("SetVersion should be a no-op on Linux, but got event with version %s", ver)
}
m.Stop()
}

View File

@@ -0,0 +1,227 @@
//go:build windows || darwin
package updater
import (
"context"
"fmt"
"path"
"testing"
"time"
v "github.com/hashicorp/go-version"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/statemanager"
cProto "github.com/netbirdio/netbird/client/proto"
)
func Test_LatestVersion(t *testing.T) {
testMatrix := []struct {
name string
daemonVersion string
initialLatestVersion *v.Version
latestVersion *v.Version
shouldUpdateInit bool
shouldUpdateLater bool
}{
{
name: "Should notify again when a newer version arrives even within 5 minutes",
daemonVersion: "1.0.0",
initialLatestVersion: v.Must(v.NewSemver("1.0.1")),
latestVersion: v.Must(v.NewSemver("1.0.2")),
shouldUpdateInit: true,
shouldUpdateLater: true,
},
{
name: "Shouldn't update initially, but should update as soon as latest version is fetched",
daemonVersion: "1.0.0",
initialLatestVersion: nil,
latestVersion: v.Must(v.NewSemver("1.0.1")),
shouldUpdateInit: false,
shouldUpdateLater: true,
},
}
for idx, c := range testMatrix {
mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion}
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
recorder := peer.NewRecorder("")
sub := recorder.SubscribeToEvents()
defer recorder.UnsubscribeFromEvents(sub)
m := NewManager(recorder, statemanager.New(tmpFile))
m.update = mockUpdate
m.currentVersion = c.daemonVersion
m.autoUpdateSupported = func() bool { return true }
m.Start(context.Background())
m.SetVersion("latest", false)
ver, _ := waitForUpdateEvent(sub, 500*time.Millisecond)
triggeredInit := ver != ""
if triggeredInit != c.shouldUpdateInit {
t.Errorf("%s: Initial update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit)
}
if triggeredInit && c.initialLatestVersion != nil && ver != c.initialLatestVersion.String() {
t.Errorf("%s: Initial update version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), ver)
}
mockUpdate.latestVersion = c.latestVersion
mockUpdate.onUpdate()
ver, _ = waitForUpdateEvent(sub, 500*time.Millisecond)
triggeredLater := ver != ""
if triggeredLater != c.shouldUpdateLater {
t.Errorf("%s: Later update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater)
}
if triggeredLater && c.latestVersion != nil && ver != c.latestVersion.String() {
t.Errorf("%s: Later update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), ver)
}
m.Stop()
}
}
func Test_HandleUpdate(t *testing.T) {
testMatrix := []struct {
name string
daemonVersion string
latestVersion *v.Version
expectedVersion string
shouldUpdate bool
}{
{
name: "Install to a specific version should update regardless of if latestVersion is available yet",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "0.56.0",
shouldUpdate: true,
},
{
name: "Install to specific version should not update if version matches",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "0.55.0",
shouldUpdate: false,
},
{
name: "Install to specific version should not update if current version is newer",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "0.54.0",
shouldUpdate: false,
},
{
name: "Install to latest version should update if latest is newer",
daemonVersion: "0.55.0",
latestVersion: v.Must(v.NewSemver("0.56.0")),
expectedVersion: "latest",
shouldUpdate: true,
},
{
name: "Install to latest version should not update if latest == current",
daemonVersion: "0.56.0",
latestVersion: v.Must(v.NewSemver("0.56.0")),
expectedVersion: "latest",
shouldUpdate: false,
},
{
name: "Should not update if daemon version is invalid",
daemonVersion: "development",
latestVersion: v.Must(v.NewSemver("1.0.0")),
expectedVersion: "latest",
shouldUpdate: false,
},
{
name: "Should not update if expecting latest and latest version is unavailable",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "latest",
shouldUpdate: false,
},
{
name: "Should not update if expected version is invalid",
daemonVersion: "0.55.0",
latestVersion: nil,
expectedVersion: "development",
shouldUpdate: false,
},
}
for idx, c := range testMatrix {
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
recorder := peer.NewRecorder("")
sub := recorder.SubscribeToEvents()
defer recorder.UnsubscribeFromEvents(sub)
m := NewManager(recorder, statemanager.New(tmpFile))
m.update = &versionUpdateMock{latestVersion: c.latestVersion}
m.currentVersion = c.daemonVersion
m.autoUpdateSupported = func() bool { return true }
m.Start(context.Background())
m.SetVersion(c.expectedVersion, false)
ver, _ := waitForUpdateEvent(sub, 500*time.Millisecond)
updateTriggered := ver != ""
if updateTriggered {
if c.expectedVersion == "latest" && c.latestVersion != nil && ver != c.latestVersion.String() {
t.Errorf("%s: Version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), ver)
} else if c.expectedVersion != "latest" && c.expectedVersion != "development" && ver != c.expectedVersion {
t.Errorf("%s: Version mismatch, expected %v, got %v", c.name, c.expectedVersion, ver)
}
}
if updateTriggered != c.shouldUpdate {
t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdate, updateTriggered)
}
m.Stop()
}
}
func Test_EnforcedMetadata(t *testing.T) {
// Mode 1 (downloadOnly): no enforced metadata
tmpFile := path.Join(t.TempDir(), "update-test-mode1.json")
recorder := peer.NewRecorder("")
sub := recorder.SubscribeToEvents()
defer recorder.UnsubscribeFromEvents(sub)
m := NewManager(recorder, statemanager.New(tmpFile))
m.update = &versionUpdateMock{latestVersion: v.Must(v.NewSemver("1.0.1"))}
m.currentVersion = "1.0.0"
m.Start(context.Background())
m.SetDownloadOnly()
ver, enforced := waitForUpdateEvent(sub, 500*time.Millisecond)
if ver == "" {
t.Fatal("Mode 1: expected new_version_available event")
}
if enforced {
t.Error("Mode 1: expected no enforced metadata")
}
m.Stop()
// Mode 2 (enforced, forceUpdate=false): enforced metadata present, no auto-install
tmpFile2 := path.Join(t.TempDir(), "update-test-mode2.json")
recorder2 := peer.NewRecorder("")
sub2 := recorder2.SubscribeToEvents()
defer recorder2.UnsubscribeFromEvents(sub2)
m2 := NewManager(recorder2, statemanager.New(tmpFile2))
m2.update = &versionUpdateMock{latestVersion: nil}
m2.currentVersion = "1.0.0"
m2.autoUpdateSupported = func() bool { return true }
m2.Start(context.Background())
m2.SetVersion("1.0.1", false)
ver, enforced2 := waitForUpdateEvent(sub2, 500*time.Millisecond)
if ver == "" {
t.Fatal("Mode 2: expected new_version_available event")
}
if !enforced2 {
t.Error("Mode 2: expected enforced metadata")
}
m2.Stop()
}
// ensure the proto import is used
var _ = cProto.SystemEvent_INFO

View File

@@ -0,0 +1,56 @@
package updater
import (
"strconv"
"time"
v "github.com/hashicorp/go-version"
"github.com/netbirdio/netbird/client/internal/peer"
)
type versionUpdateMock struct {
latestVersion *v.Version
onUpdate func()
}
func (m versionUpdateMock) StopWatch() {}
func (m versionUpdateMock) SetDaemonVersion(newVersion string) bool {
return false
}
func (m *versionUpdateMock) SetOnUpdateListener(updateFn func()) {
m.onUpdate = updateFn
}
func (m versionUpdateMock) LatestVersion() *v.Version {
return m.latestVersion
}
func (m versionUpdateMock) StartFetcher() {}
// waitForUpdateEvent waits for a new_version_available event, returns the version string or "" on timeout.
func waitForUpdateEvent(sub *peer.EventSubscription, timeout time.Duration) (version string, enforced bool) {
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case event, ok := <-sub.Events():
if !ok {
return "", false
}
if val, ok := event.Metadata["new_version_available"]; ok {
enforced := false
if raw, ok := event.Metadata["enforced"]; ok {
if parsed, err := strconv.ParseBool(raw); err == nil {
enforced = parsed
}
}
return val, enforced
}
case <-timer.C:
return "", false
}
}
}

View File

@@ -10,7 +10,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/updatemanager/downloader"
"github.com/netbirdio/netbird/client/internal/updater/downloader"
)
const (

View File

@@ -0,0 +1,22 @@
package updater
import (
"context"
"time"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/updater/installer"
)
func isAutoUpdateSupported() bool {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
isBrew := !installer.TypeOfInstaller(ctx).Downloadable()
if isBrew {
log.Warnf("auto-update disabled on Homebrew installation")
return false
}
return true
}

View File

@@ -0,0 +1,7 @@
//go:build !windows && !darwin
package updater
func isAutoUpdateSupported() bool {
return false
}

View File

@@ -0,0 +1,5 @@
package updater
func isAutoUpdateSupported() bool {
return true
}

View File

@@ -1,4 +1,4 @@
package updatemanager
package updater
import v "github.com/hashicorp/go-version"

View File

@@ -160,7 +160,7 @@ func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error {
c.onHostDnsFn = func([]string) {}
cfg.WgIface = interfaceName
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false)
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder)
return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, c.stateFile)
}

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc v6.33.3
// protoc v6.33.1
// source: daemon.proto
package proto
@@ -95,6 +95,7 @@ const (
ExposeProtocol_EXPOSE_HTTPS ExposeProtocol = 1
ExposeProtocol_EXPOSE_TCP ExposeProtocol = 2
ExposeProtocol_EXPOSE_UDP ExposeProtocol = 3
ExposeProtocol_EXPOSE_TLS ExposeProtocol = 4
)
// Enum value maps for ExposeProtocol.
@@ -104,12 +105,14 @@ var (
1: "EXPOSE_HTTPS",
2: "EXPOSE_TCP",
3: "EXPOSE_UDP",
4: "EXPOSE_TLS",
}
ExposeProtocol_value = map[string]int32{
"EXPOSE_HTTP": 0,
"EXPOSE_HTTPS": 1,
"EXPOSE_TCP": 2,
"EXPOSE_UDP": 3,
"EXPOSE_TLS": 4,
}
)
@@ -945,7 +948,6 @@ type UpRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
ProfileName *string `protobuf:"bytes,1,opt,name=profileName,proto3,oneof" json:"profileName,omitempty"`
Username *string `protobuf:"bytes,2,opt,name=username,proto3,oneof" json:"username,omitempty"`
AutoUpdate *bool `protobuf:"varint,3,opt,name=autoUpdate,proto3,oneof" json:"autoUpdate,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -994,13 +996,6 @@ func (x *UpRequest) GetUsername() string {
return ""
}
func (x *UpRequest) GetAutoUpdate() bool {
if x != nil && x.AutoUpdate != nil {
return *x.AutoUpdate
}
return false
}
type UpResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
@@ -5032,6 +5027,94 @@ func (x *GetFeaturesResponse) GetDisableUpdateSettings() bool {
return false
}
type TriggerUpdateRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TriggerUpdateRequest) Reset() {
*x = TriggerUpdateRequest{}
mi := &file_daemon_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TriggerUpdateRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TriggerUpdateRequest) ProtoMessage() {}
func (x *TriggerUpdateRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[73]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TriggerUpdateRequest.ProtoReflect.Descriptor instead.
func (*TriggerUpdateRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{73}
}
type TriggerUpdateResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
ErrorMsg string `protobuf:"bytes,2,opt,name=errorMsg,proto3" json:"errorMsg,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *TriggerUpdateResponse) Reset() {
*x = TriggerUpdateResponse{}
mi := &file_daemon_proto_msgTypes[74]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *TriggerUpdateResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TriggerUpdateResponse) ProtoMessage() {}
func (x *TriggerUpdateResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[74]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TriggerUpdateResponse.ProtoReflect.Descriptor instead.
func (*TriggerUpdateResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{74}
}
func (x *TriggerUpdateResponse) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
func (x *TriggerUpdateResponse) GetErrorMsg() string {
if x != nil {
return x.ErrorMsg
}
return ""
}
// GetPeerSSHHostKeyRequest for retrieving SSH host key for a specific peer
type GetPeerSSHHostKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -5043,7 +5126,7 @@ type GetPeerSSHHostKeyRequest struct {
func (x *GetPeerSSHHostKeyRequest) Reset() {
*x = GetPeerSSHHostKeyRequest{}
mi := &file_daemon_proto_msgTypes[73]
mi := &file_daemon_proto_msgTypes[75]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5055,7 +5138,7 @@ func (x *GetPeerSSHHostKeyRequest) String() string {
func (*GetPeerSSHHostKeyRequest) ProtoMessage() {}
func (x *GetPeerSSHHostKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[73]
mi := &file_daemon_proto_msgTypes[75]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5068,7 +5151,7 @@ func (x *GetPeerSSHHostKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetPeerSSHHostKeyRequest.ProtoReflect.Descriptor instead.
func (*GetPeerSSHHostKeyRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{73}
return file_daemon_proto_rawDescGZIP(), []int{75}
}
func (x *GetPeerSSHHostKeyRequest) GetPeerAddress() string {
@@ -5095,7 +5178,7 @@ type GetPeerSSHHostKeyResponse struct {
func (x *GetPeerSSHHostKeyResponse) Reset() {
*x = GetPeerSSHHostKeyResponse{}
mi := &file_daemon_proto_msgTypes[74]
mi := &file_daemon_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5107,7 +5190,7 @@ func (x *GetPeerSSHHostKeyResponse) String() string {
func (*GetPeerSSHHostKeyResponse) ProtoMessage() {}
func (x *GetPeerSSHHostKeyResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[74]
mi := &file_daemon_proto_msgTypes[76]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5120,7 +5203,7 @@ func (x *GetPeerSSHHostKeyResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetPeerSSHHostKeyResponse.ProtoReflect.Descriptor instead.
func (*GetPeerSSHHostKeyResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{74}
return file_daemon_proto_rawDescGZIP(), []int{76}
}
func (x *GetPeerSSHHostKeyResponse) GetSshHostKey() []byte {
@@ -5162,7 +5245,7 @@ type RequestJWTAuthRequest struct {
func (x *RequestJWTAuthRequest) Reset() {
*x = RequestJWTAuthRequest{}
mi := &file_daemon_proto_msgTypes[75]
mi := &file_daemon_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5174,7 +5257,7 @@ func (x *RequestJWTAuthRequest) String() string {
func (*RequestJWTAuthRequest) ProtoMessage() {}
func (x *RequestJWTAuthRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[75]
mi := &file_daemon_proto_msgTypes[77]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5187,7 +5270,7 @@ func (x *RequestJWTAuthRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RequestJWTAuthRequest.ProtoReflect.Descriptor instead.
func (*RequestJWTAuthRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{75}
return file_daemon_proto_rawDescGZIP(), []int{77}
}
func (x *RequestJWTAuthRequest) GetHint() string {
@@ -5220,7 +5303,7 @@ type RequestJWTAuthResponse struct {
func (x *RequestJWTAuthResponse) Reset() {
*x = RequestJWTAuthResponse{}
mi := &file_daemon_proto_msgTypes[76]
mi := &file_daemon_proto_msgTypes[78]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5232,7 +5315,7 @@ func (x *RequestJWTAuthResponse) String() string {
func (*RequestJWTAuthResponse) ProtoMessage() {}
func (x *RequestJWTAuthResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[76]
mi := &file_daemon_proto_msgTypes[78]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5245,7 +5328,7 @@ func (x *RequestJWTAuthResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RequestJWTAuthResponse.ProtoReflect.Descriptor instead.
func (*RequestJWTAuthResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{76}
return file_daemon_proto_rawDescGZIP(), []int{78}
}
func (x *RequestJWTAuthResponse) GetVerificationURI() string {
@@ -5310,7 +5393,7 @@ type WaitJWTTokenRequest struct {
func (x *WaitJWTTokenRequest) Reset() {
*x = WaitJWTTokenRequest{}
mi := &file_daemon_proto_msgTypes[77]
mi := &file_daemon_proto_msgTypes[79]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5322,7 +5405,7 @@ func (x *WaitJWTTokenRequest) String() string {
func (*WaitJWTTokenRequest) ProtoMessage() {}
func (x *WaitJWTTokenRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[77]
mi := &file_daemon_proto_msgTypes[79]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5335,7 +5418,7 @@ func (x *WaitJWTTokenRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use WaitJWTTokenRequest.ProtoReflect.Descriptor instead.
func (*WaitJWTTokenRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{77}
return file_daemon_proto_rawDescGZIP(), []int{79}
}
func (x *WaitJWTTokenRequest) GetDeviceCode() string {
@@ -5367,7 +5450,7 @@ type WaitJWTTokenResponse struct {
func (x *WaitJWTTokenResponse) Reset() {
*x = WaitJWTTokenResponse{}
mi := &file_daemon_proto_msgTypes[78]
mi := &file_daemon_proto_msgTypes[80]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5379,7 +5462,7 @@ func (x *WaitJWTTokenResponse) String() string {
func (*WaitJWTTokenResponse) ProtoMessage() {}
func (x *WaitJWTTokenResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[78]
mi := &file_daemon_proto_msgTypes[80]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5392,7 +5475,7 @@ func (x *WaitJWTTokenResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use WaitJWTTokenResponse.ProtoReflect.Descriptor instead.
func (*WaitJWTTokenResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{78}
return file_daemon_proto_rawDescGZIP(), []int{80}
}
func (x *WaitJWTTokenResponse) GetToken() string {
@@ -5425,7 +5508,7 @@ type StartCPUProfileRequest struct {
func (x *StartCPUProfileRequest) Reset() {
*x = StartCPUProfileRequest{}
mi := &file_daemon_proto_msgTypes[79]
mi := &file_daemon_proto_msgTypes[81]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5437,7 +5520,7 @@ func (x *StartCPUProfileRequest) String() string {
func (*StartCPUProfileRequest) ProtoMessage() {}
func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[79]
mi := &file_daemon_proto_msgTypes[81]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5450,7 +5533,7 @@ func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartCPUProfileRequest.ProtoReflect.Descriptor instead.
func (*StartCPUProfileRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{79}
return file_daemon_proto_rawDescGZIP(), []int{81}
}
// StartCPUProfileResponse confirms CPU profiling has started
@@ -5462,7 +5545,7 @@ type StartCPUProfileResponse struct {
func (x *StartCPUProfileResponse) Reset() {
*x = StartCPUProfileResponse{}
mi := &file_daemon_proto_msgTypes[80]
mi := &file_daemon_proto_msgTypes[82]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5474,7 +5557,7 @@ func (x *StartCPUProfileResponse) String() string {
func (*StartCPUProfileResponse) ProtoMessage() {}
func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[80]
mi := &file_daemon_proto_msgTypes[82]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5487,7 +5570,7 @@ func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartCPUProfileResponse.ProtoReflect.Descriptor instead.
func (*StartCPUProfileResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{80}
return file_daemon_proto_rawDescGZIP(), []int{82}
}
// StopCPUProfileRequest for stopping CPU profiling
@@ -5499,7 +5582,7 @@ type StopCPUProfileRequest struct {
func (x *StopCPUProfileRequest) Reset() {
*x = StopCPUProfileRequest{}
mi := &file_daemon_proto_msgTypes[81]
mi := &file_daemon_proto_msgTypes[83]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5511,7 +5594,7 @@ func (x *StopCPUProfileRequest) String() string {
func (*StopCPUProfileRequest) ProtoMessage() {}
func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[81]
mi := &file_daemon_proto_msgTypes[83]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5524,7 +5607,7 @@ func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopCPUProfileRequest.ProtoReflect.Descriptor instead.
func (*StopCPUProfileRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{81}
return file_daemon_proto_rawDescGZIP(), []int{83}
}
// StopCPUProfileResponse confirms CPU profiling has stopped
@@ -5536,7 +5619,7 @@ type StopCPUProfileResponse struct {
func (x *StopCPUProfileResponse) Reset() {
*x = StopCPUProfileResponse{}
mi := &file_daemon_proto_msgTypes[82]
mi := &file_daemon_proto_msgTypes[84]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5548,7 +5631,7 @@ func (x *StopCPUProfileResponse) String() string {
func (*StopCPUProfileResponse) ProtoMessage() {}
func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[82]
mi := &file_daemon_proto_msgTypes[84]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5561,7 +5644,7 @@ func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StopCPUProfileResponse.ProtoReflect.Descriptor instead.
func (*StopCPUProfileResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{82}
return file_daemon_proto_rawDescGZIP(), []int{84}
}
type InstallerResultRequest struct {
@@ -5572,7 +5655,7 @@ type InstallerResultRequest struct {
func (x *InstallerResultRequest) Reset() {
*x = InstallerResultRequest{}
mi := &file_daemon_proto_msgTypes[83]
mi := &file_daemon_proto_msgTypes[85]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5584,7 +5667,7 @@ func (x *InstallerResultRequest) String() string {
func (*InstallerResultRequest) ProtoMessage() {}
func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[83]
mi := &file_daemon_proto_msgTypes[85]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5597,7 +5680,7 @@ func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use InstallerResultRequest.ProtoReflect.Descriptor instead.
func (*InstallerResultRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{83}
return file_daemon_proto_rawDescGZIP(), []int{85}
}
type InstallerResultResponse struct {
@@ -5610,7 +5693,7 @@ type InstallerResultResponse struct {
func (x *InstallerResultResponse) Reset() {
*x = InstallerResultResponse{}
mi := &file_daemon_proto_msgTypes[84]
mi := &file_daemon_proto_msgTypes[86]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5622,7 +5705,7 @@ func (x *InstallerResultResponse) String() string {
func (*InstallerResultResponse) ProtoMessage() {}
func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[84]
mi := &file_daemon_proto_msgTypes[86]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5635,7 +5718,7 @@ func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use InstallerResultResponse.ProtoReflect.Descriptor instead.
func (*InstallerResultResponse) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{84}
return file_daemon_proto_rawDescGZIP(), []int{86}
}
func (x *InstallerResultResponse) GetSuccess() bool {
@@ -5661,13 +5744,14 @@ type ExposeServiceRequest struct {
UserGroups []string `protobuf:"bytes,5,rep,name=user_groups,json=userGroups,proto3" json:"user_groups,omitempty"`
Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"`
NamePrefix string `protobuf:"bytes,7,opt,name=name_prefix,json=namePrefix,proto3" json:"name_prefix,omitempty"`
ListenPort uint32 `protobuf:"varint,8,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExposeServiceRequest) Reset() {
*x = ExposeServiceRequest{}
mi := &file_daemon_proto_msgTypes[85]
mi := &file_daemon_proto_msgTypes[87]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5679,7 +5763,7 @@ func (x *ExposeServiceRequest) String() string {
func (*ExposeServiceRequest) ProtoMessage() {}
func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[85]
mi := &file_daemon_proto_msgTypes[87]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5692,7 +5776,7 @@ func (x *ExposeServiceRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExposeServiceRequest.ProtoReflect.Descriptor instead.
func (*ExposeServiceRequest) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{85}
return file_daemon_proto_rawDescGZIP(), []int{87}
}
func (x *ExposeServiceRequest) GetPort() uint32 {
@@ -5744,6 +5828,13 @@ func (x *ExposeServiceRequest) GetNamePrefix() string {
return ""
}
func (x *ExposeServiceRequest) GetListenPort() uint32 {
if x != nil {
return x.ListenPort
}
return 0
}
type ExposeServiceEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Types that are valid to be assigned to Event:
@@ -5756,7 +5847,7 @@ type ExposeServiceEvent struct {
func (x *ExposeServiceEvent) Reset() {
*x = ExposeServiceEvent{}
mi := &file_daemon_proto_msgTypes[86]
mi := &file_daemon_proto_msgTypes[88]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5768,7 +5859,7 @@ func (x *ExposeServiceEvent) String() string {
func (*ExposeServiceEvent) ProtoMessage() {}
func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[86]
mi := &file_daemon_proto_msgTypes[88]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5781,7 +5872,7 @@ func (x *ExposeServiceEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExposeServiceEvent.ProtoReflect.Descriptor instead.
func (*ExposeServiceEvent) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{86}
return file_daemon_proto_rawDescGZIP(), []int{88}
}
func (x *ExposeServiceEvent) GetEvent() isExposeServiceEvent_Event {
@@ -5811,17 +5902,18 @@ type ExposeServiceEvent_Ready struct {
func (*ExposeServiceEvent_Ready) isExposeServiceEvent_Event() {}
type ExposeServiceReady struct {
state protoimpl.MessageState `protogen:"open.v1"`
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"`
Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState `protogen:"open.v1"`
ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
ServiceUrl string `protobuf:"bytes,2,opt,name=service_url,json=serviceUrl,proto3" json:"service_url,omitempty"`
Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"`
PortAutoAssigned bool `protobuf:"varint,4,opt,name=port_auto_assigned,json=portAutoAssigned,proto3" json:"port_auto_assigned,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExposeServiceReady) Reset() {
*x = ExposeServiceReady{}
mi := &file_daemon_proto_msgTypes[87]
mi := &file_daemon_proto_msgTypes[89]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5833,7 +5925,7 @@ func (x *ExposeServiceReady) String() string {
func (*ExposeServiceReady) ProtoMessage() {}
func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[87]
mi := &file_daemon_proto_msgTypes[89]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5846,7 +5938,7 @@ func (x *ExposeServiceReady) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExposeServiceReady.ProtoReflect.Descriptor instead.
func (*ExposeServiceReady) Descriptor() ([]byte, []int) {
return file_daemon_proto_rawDescGZIP(), []int{87}
return file_daemon_proto_rawDescGZIP(), []int{89}
}
func (x *ExposeServiceReady) GetServiceName() string {
@@ -5870,6 +5962,13 @@ func (x *ExposeServiceReady) GetDomain() string {
return ""
}
func (x *ExposeServiceReady) GetPortAutoAssigned() bool {
if x != nil {
return x.PortAutoAssigned
}
return false
}
type PortInfo_Range struct {
state protoimpl.MessageState `protogen:"open.v1"`
Start uint32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
@@ -5880,7 +5979,7 @@ type PortInfo_Range struct {
func (x *PortInfo_Range) Reset() {
*x = PortInfo_Range{}
mi := &file_daemon_proto_msgTypes[89]
mi := &file_daemon_proto_msgTypes[91]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5892,7 +5991,7 @@ func (x *PortInfo_Range) String() string {
func (*PortInfo_Range) ProtoMessage() {}
func (x *PortInfo_Range) ProtoReflect() protoreflect.Message {
mi := &file_daemon_proto_msgTypes[89]
mi := &file_daemon_proto_msgTypes[91]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6016,16 +6115,12 @@ const file_daemon_proto_rawDesc = "" +
"\buserCode\x18\x01 \x01(\tR\buserCode\x12\x1a\n" +
"\bhostname\x18\x02 \x01(\tR\bhostname\",\n" +
"\x14WaitSSOLoginResponse\x12\x14\n" +
"\x05email\x18\x01 \x01(\tR\x05email\"\xa4\x01\n" +
"\x05email\x18\x01 \x01(\tR\x05email\"v\n" +
"\tUpRequest\x12%\n" +
"\vprofileName\x18\x01 \x01(\tH\x00R\vprofileName\x88\x01\x01\x12\x1f\n" +
"\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01\x12#\n" +
"\n" +
"autoUpdate\x18\x03 \x01(\bH\x02R\n" +
"autoUpdate\x88\x01\x01B\x0e\n" +
"\busername\x18\x02 \x01(\tH\x01R\busername\x88\x01\x01B\x0e\n" +
"\f_profileNameB\v\n" +
"\t_usernameB\r\n" +
"\v_autoUpdate\"\f\n" +
"\t_usernameJ\x04\b\x03\x10\x04\"\f\n" +
"\n" +
"UpResponse\"\xa1\x01\n" +
"\rStatusRequest\x12,\n" +
@@ -6380,7 +6475,11 @@ const file_daemon_proto_rawDesc = "" +
"\x12GetFeaturesRequest\"x\n" +
"\x13GetFeaturesResponse\x12)\n" +
"\x10disable_profiles\x18\x01 \x01(\bR\x0fdisableProfiles\x126\n" +
"\x17disable_update_settings\x18\x02 \x01(\bR\x15disableUpdateSettings\"<\n" +
"\x17disable_update_settings\x18\x02 \x01(\bR\x15disableUpdateSettings\"\x16\n" +
"\x14TriggerUpdateRequest\"M\n" +
"\x15TriggerUpdateResponse\x12\x18\n" +
"\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" +
"\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"<\n" +
"\x18GetPeerSSHHostKeyRequest\x12 \n" +
"\vpeerAddress\x18\x01 \x01(\tR\vpeerAddress\"\x85\x01\n" +
"\x19GetPeerSSHHostKeyResponse\x12\x1e\n" +
@@ -6419,7 +6518,7 @@ const file_daemon_proto_rawDesc = "" +
"\x16InstallerResultRequest\"O\n" +
"\x17InstallerResultResponse\x12\x18\n" +
"\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" +
"\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"\xe6\x01\n" +
"\berrorMsg\x18\x02 \x01(\tR\berrorMsg\"\x87\x02\n" +
"\x14ExposeServiceRequest\x12\x12\n" +
"\x04port\x18\x01 \x01(\rR\x04port\x122\n" +
"\bprotocol\x18\x02 \x01(\x0e2\x16.daemon.ExposeProtocolR\bprotocol\x12\x10\n" +
@@ -6429,15 +6528,18 @@ const file_daemon_proto_rawDesc = "" +
"userGroups\x12\x16\n" +
"\x06domain\x18\x06 \x01(\tR\x06domain\x12\x1f\n" +
"\vname_prefix\x18\a \x01(\tR\n" +
"namePrefix\"Q\n" +
"namePrefix\x12\x1f\n" +
"\vlisten_port\x18\b \x01(\rR\n" +
"listenPort\"Q\n" +
"\x12ExposeServiceEvent\x122\n" +
"\x05ready\x18\x01 \x01(\v2\x1a.daemon.ExposeServiceReadyH\x00R\x05readyB\a\n" +
"\x05event\"p\n" +
"\x05event\"\x9e\x01\n" +
"\x12ExposeServiceReady\x12!\n" +
"\fservice_name\x18\x01 \x01(\tR\vserviceName\x12\x1f\n" +
"\vservice_url\x18\x02 \x01(\tR\n" +
"serviceUrl\x12\x16\n" +
"\x06domain\x18\x03 \x01(\tR\x06domain*b\n" +
"\x06domain\x18\x03 \x01(\tR\x06domain\x12,\n" +
"\x12port_auto_assigned\x18\x04 \x01(\bR\x10portAutoAssigned*b\n" +
"\bLogLevel\x12\v\n" +
"\aUNKNOWN\x10\x00\x12\t\n" +
"\x05PANIC\x10\x01\x12\t\n" +
@@ -6446,14 +6548,16 @@ const file_daemon_proto_rawDesc = "" +
"\x04WARN\x10\x04\x12\b\n" +
"\x04INFO\x10\x05\x12\t\n" +
"\x05DEBUG\x10\x06\x12\t\n" +
"\x05TRACE\x10\a*S\n" +
"\x05TRACE\x10\a*c\n" +
"\x0eExposeProtocol\x12\x0f\n" +
"\vEXPOSE_HTTP\x10\x00\x12\x10\n" +
"\fEXPOSE_HTTPS\x10\x01\x12\x0e\n" +
"\n" +
"EXPOSE_TCP\x10\x02\x12\x0e\n" +
"\n" +
"EXPOSE_UDP\x10\x032\xac\x15\n" +
"EXPOSE_UDP\x10\x03\x12\x0e\n" +
"\n" +
"EXPOSE_TLS\x10\x042\xfc\x15\n" +
"\rDaemonService\x126\n" +
"\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" +
"\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" +
@@ -6485,7 +6589,8 @@ const file_daemon_proto_rawDesc = "" +
"\fListProfiles\x12\x1b.daemon.ListProfilesRequest\x1a\x1c.daemon.ListProfilesResponse\"\x00\x12W\n" +
"\x10GetActiveProfile\x12\x1f.daemon.GetActiveProfileRequest\x1a .daemon.GetActiveProfileResponse\"\x00\x129\n" +
"\x06Logout\x12\x15.daemon.LogoutRequest\x1a\x16.daemon.LogoutResponse\"\x00\x12H\n" +
"\vGetFeatures\x12\x1a.daemon.GetFeaturesRequest\x1a\x1b.daemon.GetFeaturesResponse\"\x00\x12Z\n" +
"\vGetFeatures\x12\x1a.daemon.GetFeaturesRequest\x1a\x1b.daemon.GetFeaturesResponse\"\x00\x12N\n" +
"\rTriggerUpdate\x12\x1c.daemon.TriggerUpdateRequest\x1a\x1d.daemon.TriggerUpdateResponse\"\x00\x12Z\n" +
"\x11GetPeerSSHHostKey\x12 .daemon.GetPeerSSHHostKeyRequest\x1a!.daemon.GetPeerSSHHostKeyResponse\"\x00\x12Q\n" +
"\x0eRequestJWTAuth\x12\x1d.daemon.RequestJWTAuthRequest\x1a\x1e.daemon.RequestJWTAuthResponse\"\x00\x12K\n" +
"\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12T\n" +
@@ -6508,7 +6613,7 @@ func file_daemon_proto_rawDescGZIP() []byte {
}
var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 91)
var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 93)
var file_daemon_proto_goTypes = []any{
(LogLevel)(0), // 0: daemon.LogLevel
(ExposeProtocol)(0), // 1: daemon.ExposeProtocol
@@ -6588,34 +6693,36 @@ var file_daemon_proto_goTypes = []any{
(*LogoutResponse)(nil), // 75: daemon.LogoutResponse
(*GetFeaturesRequest)(nil), // 76: daemon.GetFeaturesRequest
(*GetFeaturesResponse)(nil), // 77: daemon.GetFeaturesResponse
(*GetPeerSSHHostKeyRequest)(nil), // 78: daemon.GetPeerSSHHostKeyRequest
(*GetPeerSSHHostKeyResponse)(nil), // 79: daemon.GetPeerSSHHostKeyResponse
(*RequestJWTAuthRequest)(nil), // 80: daemon.RequestJWTAuthRequest
(*RequestJWTAuthResponse)(nil), // 81: daemon.RequestJWTAuthResponse
(*WaitJWTTokenRequest)(nil), // 82: daemon.WaitJWTTokenRequest
(*WaitJWTTokenResponse)(nil), // 83: daemon.WaitJWTTokenResponse
(*StartCPUProfileRequest)(nil), // 84: daemon.StartCPUProfileRequest
(*StartCPUProfileResponse)(nil), // 85: daemon.StartCPUProfileResponse
(*StopCPUProfileRequest)(nil), // 86: daemon.StopCPUProfileRequest
(*StopCPUProfileResponse)(nil), // 87: daemon.StopCPUProfileResponse
(*InstallerResultRequest)(nil), // 88: daemon.InstallerResultRequest
(*InstallerResultResponse)(nil), // 89: daemon.InstallerResultResponse
(*ExposeServiceRequest)(nil), // 90: daemon.ExposeServiceRequest
(*ExposeServiceEvent)(nil), // 91: daemon.ExposeServiceEvent
(*ExposeServiceReady)(nil), // 92: daemon.ExposeServiceReady
nil, // 93: daemon.Network.ResolvedIPsEntry
(*PortInfo_Range)(nil), // 94: daemon.PortInfo.Range
nil, // 95: daemon.SystemEvent.MetadataEntry
(*durationpb.Duration)(nil), // 96: google.protobuf.Duration
(*timestamppb.Timestamp)(nil), // 97: google.protobuf.Timestamp
(*TriggerUpdateRequest)(nil), // 78: daemon.TriggerUpdateRequest
(*TriggerUpdateResponse)(nil), // 79: daemon.TriggerUpdateResponse
(*GetPeerSSHHostKeyRequest)(nil), // 80: daemon.GetPeerSSHHostKeyRequest
(*GetPeerSSHHostKeyResponse)(nil), // 81: daemon.GetPeerSSHHostKeyResponse
(*RequestJWTAuthRequest)(nil), // 82: daemon.RequestJWTAuthRequest
(*RequestJWTAuthResponse)(nil), // 83: daemon.RequestJWTAuthResponse
(*WaitJWTTokenRequest)(nil), // 84: daemon.WaitJWTTokenRequest
(*WaitJWTTokenResponse)(nil), // 85: daemon.WaitJWTTokenResponse
(*StartCPUProfileRequest)(nil), // 86: daemon.StartCPUProfileRequest
(*StartCPUProfileResponse)(nil), // 87: daemon.StartCPUProfileResponse
(*StopCPUProfileRequest)(nil), // 88: daemon.StopCPUProfileRequest
(*StopCPUProfileResponse)(nil), // 89: daemon.StopCPUProfileResponse
(*InstallerResultRequest)(nil), // 90: daemon.InstallerResultRequest
(*InstallerResultResponse)(nil), // 91: daemon.InstallerResultResponse
(*ExposeServiceRequest)(nil), // 92: daemon.ExposeServiceRequest
(*ExposeServiceEvent)(nil), // 93: daemon.ExposeServiceEvent
(*ExposeServiceReady)(nil), // 94: daemon.ExposeServiceReady
nil, // 95: daemon.Network.ResolvedIPsEntry
(*PortInfo_Range)(nil), // 96: daemon.PortInfo.Range
nil, // 97: daemon.SystemEvent.MetadataEntry
(*durationpb.Duration)(nil), // 98: google.protobuf.Duration
(*timestamppb.Timestamp)(nil), // 99: google.protobuf.Timestamp
}
var file_daemon_proto_depIdxs = []int32{
2, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType
96, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration
98, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration
28, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus
97, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp
97, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp
96, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration
99, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp
99, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp
98, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration
26, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo
23, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState
22, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState
@@ -6626,8 +6733,8 @@ var file_daemon_proto_depIdxs = []int32{
58, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent
27, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState
34, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network
93, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry
94, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range
95, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry
96, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range
35, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo
35, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo
36, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule
@@ -6638,13 +6745,13 @@ var file_daemon_proto_depIdxs = []int32{
55, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage
3, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity
4, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category
97, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp
95, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry
99, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp
97, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry
58, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent
96, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration
98, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration
71, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile
1, // 33: daemon.ExposeServiceRequest.protocol:type_name -> daemon.ExposeProtocol
92, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady
94, // 34: daemon.ExposeServiceEvent.ready:type_name -> daemon.ExposeServiceReady
33, // 35: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList
8, // 36: daemon.DaemonService.Login:input_type -> daemon.LoginRequest
10, // 37: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest
@@ -6674,52 +6781,54 @@ var file_daemon_proto_depIdxs = []int32{
72, // 61: daemon.DaemonService.GetActiveProfile:input_type -> daemon.GetActiveProfileRequest
74, // 62: daemon.DaemonService.Logout:input_type -> daemon.LogoutRequest
76, // 63: daemon.DaemonService.GetFeatures:input_type -> daemon.GetFeaturesRequest
78, // 64: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest
80, // 65: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest
82, // 66: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest
84, // 67: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest
86, // 68: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest
6, // 69: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest
88, // 70: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest
90, // 71: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest
9, // 72: daemon.DaemonService.Login:output_type -> daemon.LoginResponse
11, // 73: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse
13, // 74: daemon.DaemonService.Up:output_type -> daemon.UpResponse
15, // 75: daemon.DaemonService.Status:output_type -> daemon.StatusResponse
17, // 76: daemon.DaemonService.Down:output_type -> daemon.DownResponse
19, // 77: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse
30, // 78: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse
32, // 79: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse
32, // 80: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse
37, // 81: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse
39, // 82: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse
41, // 83: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse
43, // 84: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse
46, // 85: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse
48, // 86: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse
50, // 87: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse
52, // 88: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse
56, // 89: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse
58, // 90: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent
60, // 91: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse
62, // 92: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse
64, // 93: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse
66, // 94: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse
68, // 95: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse
70, // 96: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse
73, // 97: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse
75, // 98: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse
77, // 99: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse
79, // 100: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse
81, // 101: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse
83, // 102: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse
85, // 103: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse
87, // 104: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse
7, // 105: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse
89, // 106: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse
91, // 107: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent
72, // [72:108] is the sub-list for method output_type
36, // [36:72] is the sub-list for method input_type
78, // 64: daemon.DaemonService.TriggerUpdate:input_type -> daemon.TriggerUpdateRequest
80, // 65: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest
82, // 66: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest
84, // 67: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest
86, // 68: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest
88, // 69: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest
6, // 70: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest
90, // 71: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest
92, // 72: daemon.DaemonService.ExposeService:input_type -> daemon.ExposeServiceRequest
9, // 73: daemon.DaemonService.Login:output_type -> daemon.LoginResponse
11, // 74: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse
13, // 75: daemon.DaemonService.Up:output_type -> daemon.UpResponse
15, // 76: daemon.DaemonService.Status:output_type -> daemon.StatusResponse
17, // 77: daemon.DaemonService.Down:output_type -> daemon.DownResponse
19, // 78: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse
30, // 79: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse
32, // 80: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse
32, // 81: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse
37, // 82: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse
39, // 83: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse
41, // 84: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse
43, // 85: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse
46, // 86: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse
48, // 87: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse
50, // 88: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse
52, // 89: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse
56, // 90: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse
58, // 91: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent
60, // 92: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse
62, // 93: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse
64, // 94: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse
66, // 95: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse
68, // 96: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse
70, // 97: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse
73, // 98: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse
75, // 99: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse
77, // 100: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse
79, // 101: daemon.DaemonService.TriggerUpdate:output_type -> daemon.TriggerUpdateResponse
81, // 102: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse
83, // 103: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse
85, // 104: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse
87, // 105: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse
89, // 106: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse
7, // 107: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse
91, // 108: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse
93, // 109: daemon.DaemonService.ExposeService:output_type -> daemon.ExposeServiceEvent
73, // [73:110] is the sub-list for method output_type
36, // [36:73] is the sub-list for method input_type
36, // [36:36] is the sub-list for extension type_name
36, // [36:36] is the sub-list for extension extendee
0, // [0:36] is the sub-list for field type_name
@@ -6742,8 +6851,8 @@ func file_daemon_proto_init() {
file_daemon_proto_msgTypes[56].OneofWrappers = []any{}
file_daemon_proto_msgTypes[58].OneofWrappers = []any{}
file_daemon_proto_msgTypes[69].OneofWrappers = []any{}
file_daemon_proto_msgTypes[75].OneofWrappers = []any{}
file_daemon_proto_msgTypes[86].OneofWrappers = []any{
file_daemon_proto_msgTypes[77].OneofWrappers = []any{}
file_daemon_proto_msgTypes[88].OneofWrappers = []any{
(*ExposeServiceEvent_Ready)(nil),
}
type x struct{}
@@ -6752,7 +6861,7 @@ func file_daemon_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)),
NumEnums: 5,
NumMessages: 91,
NumMessages: 93,
NumExtensions: 0,
NumServices: 1,
},

View File

@@ -85,6 +85,10 @@ service DaemonService {
rpc GetFeatures(GetFeaturesRequest) returns (GetFeaturesResponse) {}
// TriggerUpdate initiates installation of the pending enforced version.
// Called when the user clicks the install button in the UI (Mode 2 / enforced update).
rpc TriggerUpdate(TriggerUpdateRequest) returns (TriggerUpdateResponse) {}
// GetPeerSSHHostKey retrieves SSH host key for a specific peer
rpc GetPeerSSHHostKey(GetPeerSSHHostKeyRequest) returns (GetPeerSSHHostKeyResponse) {}
@@ -226,7 +230,7 @@ message WaitSSOLoginResponse {
message UpRequest {
optional string profileName = 1;
optional string username = 2;
optional bool autoUpdate = 3;
reserved 3;
}
message UpResponse {}
@@ -725,6 +729,13 @@ message GetFeaturesResponse{
bool disable_update_settings = 2;
}
message TriggerUpdateRequest {}
message TriggerUpdateResponse {
bool success = 1;
string errorMsg = 2;
}
// GetPeerSSHHostKeyRequest for retrieving SSH host key for a specific peer
message GetPeerSSHHostKeyRequest {
// peer IP address or FQDN to get SSH host key for
@@ -810,6 +821,7 @@ enum ExposeProtocol {
EXPOSE_HTTPS = 1;
EXPOSE_TCP = 2;
EXPOSE_UDP = 3;
EXPOSE_TLS = 4;
}
message ExposeServiceRequest {
@@ -820,6 +832,7 @@ message ExposeServiceRequest {
repeated string user_groups = 5;
string domain = 6;
string name_prefix = 7;
uint32 listen_port = 8;
}
message ExposeServiceEvent {
@@ -832,4 +845,5 @@ message ExposeServiceReady {
string service_name = 1;
string service_url = 2;
string domain = 3;
bool port_auto_assigned = 4;
}

View File

@@ -64,6 +64,9 @@ type DaemonServiceClient interface {
// Logout disconnects from the network and deletes the peer from the management server
Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error)
GetFeatures(ctx context.Context, in *GetFeaturesRequest, opts ...grpc.CallOption) (*GetFeaturesResponse, error)
// TriggerUpdate initiates installation of the pending enforced version.
// Called when the user clicks the install button in the UI (Mode 2 / enforced update).
TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error)
// GetPeerSSHHostKey retrieves SSH host key for a specific peer
GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error)
// RequestJWTAuth initiates JWT authentication flow for SSH
@@ -363,6 +366,15 @@ func (c *daemonServiceClient) GetFeatures(ctx context.Context, in *GetFeaturesRe
return out, nil
}
func (c *daemonServiceClient) TriggerUpdate(ctx context.Context, in *TriggerUpdateRequest, opts ...grpc.CallOption) (*TriggerUpdateResponse, error) {
out := new(TriggerUpdateResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/TriggerUpdate", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonServiceClient) GetPeerSSHHostKey(ctx context.Context, in *GetPeerSSHHostKeyRequest, opts ...grpc.CallOption) (*GetPeerSSHHostKeyResponse, error) {
out := new(GetPeerSSHHostKeyResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetPeerSSHHostKey", in, out, opts...)
@@ -508,6 +520,9 @@ type DaemonServiceServer interface {
// Logout disconnects from the network and deletes the peer from the management server
Logout(context.Context, *LogoutRequest) (*LogoutResponse, error)
GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error)
// TriggerUpdate initiates installation of the pending enforced version.
// Called when the user clicks the install button in the UI (Mode 2 / enforced update).
TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error)
// GetPeerSSHHostKey retrieves SSH host key for a specific peer
GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error)
// RequestJWTAuth initiates JWT authentication flow for SSH
@@ -613,6 +628,9 @@ func (UnimplementedDaemonServiceServer) Logout(context.Context, *LogoutRequest)
func (UnimplementedDaemonServiceServer) GetFeatures(context.Context, *GetFeaturesRequest) (*GetFeaturesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetFeatures not implemented")
}
func (UnimplementedDaemonServiceServer) TriggerUpdate(context.Context, *TriggerUpdateRequest) (*TriggerUpdateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TriggerUpdate not implemented")
}
func (UnimplementedDaemonServiceServer) GetPeerSSHHostKey(context.Context, *GetPeerSSHHostKeyRequest) (*GetPeerSSHHostKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetPeerSSHHostKey not implemented")
}
@@ -1157,6 +1175,24 @@ func _DaemonService_GetFeatures_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
func _DaemonService_TriggerUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TriggerUpdateRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).TriggerUpdate(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/TriggerUpdate",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).TriggerUpdate(ctx, req.(*TriggerUpdateRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DaemonService_GetPeerSSHHostKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetPeerSSHHostKeyRequest)
if err := dec(in); err != nil {
@@ -1419,6 +1455,10 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetFeatures",
Handler: _DaemonService_GetFeatures_Handler,
},
{
MethodName: "TriggerUpdate",
Handler: _DaemonService_TriggerUpdate_Handler,
},
{
MethodName: "GetPeerSSHHostKey",
Handler: _DaemonService_GetPeerSSHHostKey_Handler,

View File

@@ -14,6 +14,7 @@ func (s *Server) SubscribeEvents(req *proto.SubscribeRequest, stream proto.Daemo
}()
log.Debug("client subscribed to events")
s.startUpdateManagerForGUI()
for {
select {

View File

@@ -30,6 +30,8 @@ import (
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/statemanager"
"github.com/netbirdio/netbird/client/internal/updater"
"github.com/netbirdio/netbird/client/proto"
"github.com/netbirdio/netbird/version"
)
@@ -89,6 +91,8 @@ type Server struct {
sleepHandler *sleephandler.SleepHandler
updateManager *updater.Manager
jwtCache *jwtCache
}
@@ -135,6 +139,12 @@ func (s *Server) Start() error {
log.Warnf(errRestoreResidualState, err)
}
if s.updateManager == nil {
stateMgr := statemanager.New(s.profileManager.GetStatePath())
s.updateManager = updater.NewManager(s.statusRecorder, stateMgr)
s.updateManager.CheckUpdateSuccess(s.rootCtx)
}
// if current state contains any error, return it
// in all other cases we can continue execution only if status is idle and up command was
// not in the progress or already successfully established connection.
@@ -192,14 +202,14 @@ func (s *Server) Start() error {
s.clientRunning = true
s.clientRunningChan = make(chan struct{})
s.clientGiveUpChan = make(chan struct{})
go s.connectWithRetryRuns(ctx, config, s.statusRecorder, false, s.clientRunningChan, s.clientGiveUpChan)
go s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.clientRunningChan, s.clientGiveUpChan)
return nil
}
// connectWithRetryRuns runs the client connection with a backoff strategy where we retry the operation as additional
// mechanism to keep the client connected even when the connection is lost.
// we cancel retry if the client receive a stop or down command, or if disable auto connect is configured.
func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}, giveUpChan chan struct{}) {
func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}, giveUpChan chan struct{}) {
defer func() {
s.mutex.Lock()
s.clientRunning = false
@@ -207,7 +217,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil
}()
if s.config.DisableAutoConnect {
if err := s.connect(ctx, s.config, s.statusRecorder, doInitialAutoUpdate, runningChan); err != nil {
if err := s.connect(ctx, s.config, s.statusRecorder, runningChan); err != nil {
log.Debugf("run client connection exited with error: %v", err)
}
log.Tracef("client connection exited")
@@ -236,8 +246,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil
}()
runOperation := func() error {
err := s.connect(ctx, profileConfig, statusRecorder, doInitialAutoUpdate, runningChan)
doInitialAutoUpdate = false
err := s.connect(ctx, profileConfig, statusRecorder, runningChan)
if err != nil {
log.Debugf("run client connection exited with error: %v. Will retry in the background", err)
return err
@@ -717,11 +726,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
s.clientRunningChan = make(chan struct{})
s.clientGiveUpChan = make(chan struct{})
var doAutoUpdate bool
if msg != nil && msg.AutoUpdate != nil && *msg.AutoUpdate {
doAutoUpdate = true
}
go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, doAutoUpdate, s.clientRunningChan, s.clientGiveUpChan)
go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, s.clientRunningChan, s.clientGiveUpChan)
s.mutex.Unlock()
return s.waitForUp(callerCtx)
@@ -849,14 +854,26 @@ func (s *Server) cleanupConnection() error {
if s.actCancel == nil {
return ErrServiceNotUp
}
// Capture the engine reference before cancelling the context.
// After actCancel(), the connectWithRetryRuns goroutine wakes up
// and sets connectClient.engine = nil, causing connectClient.Stop()
// to skip the engine shutdown entirely.
var engine *internal.Engine
if s.connectClient != nil {
engine = s.connectClient.Engine()
}
s.actCancel()
if s.connectClient == nil {
return nil
}
if err := s.connectClient.Stop(); err != nil {
return err
if engine != nil {
if err := engine.Stop(); err != nil {
return err
}
}
s.connectClient = nil
@@ -1361,9 +1378,10 @@ func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.Daemon
if err := srv.Send(&proto.ExposeServiceEvent{
Event: &proto.ExposeServiceEvent_Ready{
Ready: &proto.ExposeServiceReady{
ServiceName: result.ServiceName,
ServiceUrl: result.ServiceURL,
Domain: result.Domain,
ServiceName: result.ServiceName,
ServiceUrl: result.ServiceURL,
Domain: result.Domain,
PortAutoAssigned: result.PortAutoAssigned,
},
},
}); err != nil {
@@ -1611,11 +1629,17 @@ func (s *Server) GetFeatures(ctx context.Context, msg *proto.GetFeaturesRequest)
return features, nil
}
func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}) error {
func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}) error {
log.Tracef("running client connection")
s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate)
s.connectClient.SetSyncResponsePersistence(s.persistSyncResponse)
if err := s.connectClient.Run(runningChan, s.logFile); err != nil {
client := internal.NewConnectClient(ctx, config, statusRecorder)
client.SetUpdateManager(s.updateManager)
client.SetSyncResponsePersistence(s.persistSyncResponse)
s.mutex.Lock()
s.connectClient = client
s.mutex.Unlock()
if err := client.Run(runningChan, s.logFile); err != nil {
return err
}
return nil
@@ -1639,6 +1663,14 @@ func (s *Server) checkUpdateSettingsDisabled() bool {
return false
}
func (s *Server) startUpdateManagerForGUI() {
if s.updateManager == nil {
return
}
s.updateManager.Start(s.rootCtx)
s.updateManager.NotifyUI()
}
func (s *Server) onSessionExpire() {
if runtime.GOOS != "windows" {
isUIActive := internal.CheckUIApp()

View File

@@ -0,0 +1,187 @@
package server
import (
"context"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/proto"
)
func newTestServer() *Server {
return &Server{
rootCtx: context.Background(),
statusRecorder: peer.NewRecorder(""),
}
}
func newDummyConnectClient(ctx context.Context) *internal.ConnectClient {
return internal.NewConnectClient(ctx, nil, nil)
}
// TestConnectSetsClientWithMutex validates that connect() sets s.connectClient
// under mutex protection so concurrent readers see a consistent value.
func TestConnectSetsClientWithMutex(t *testing.T) {
s := newTestServer()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Manually simulate what connect() does (without calling Run which panics without full setup)
client := newDummyConnectClient(ctx)
s.mutex.Lock()
s.connectClient = client
s.mutex.Unlock()
// Verify the assignment is visible under mutex
s.mutex.Lock()
assert.Equal(t, client, s.connectClient, "connectClient should be set")
s.mutex.Unlock()
}
// TestConcurrentConnectClientAccess validates that concurrent reads of
// s.connectClient under mutex don't race with a write.
func TestConcurrentConnectClientAccess(t *testing.T) {
s := newTestServer()
ctx := context.Background()
client := newDummyConnectClient(ctx)
var wg sync.WaitGroup
nilCount := 0
setCount := 0
var mu sync.Mutex
// Start readers
for i := 0; i < 50; i++ {
wg.Add(1)
go func() {
defer wg.Done()
s.mutex.Lock()
c := s.connectClient
s.mutex.Unlock()
mu.Lock()
defer mu.Unlock()
if c == nil {
nilCount++
} else {
setCount++
}
}()
}
// Simulate connect() writing under mutex
time.Sleep(5 * time.Millisecond)
s.mutex.Lock()
s.connectClient = client
s.mutex.Unlock()
wg.Wait()
assert.Equal(t, 50, nilCount+setCount, "all goroutines should complete without panic")
}
// TestCleanupConnection_ClearsConnectClient validates that cleanupConnection
// properly nils out connectClient.
func TestCleanupConnection_ClearsConnectClient(t *testing.T) {
s := newTestServer()
_, cancel := context.WithCancel(context.Background())
s.actCancel = cancel
s.connectClient = newDummyConnectClient(context.Background())
s.clientRunning = true
err := s.cleanupConnection()
require.NoError(t, err)
assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup")
}
// TestCleanState_NilConnectClient validates that CleanState doesn't panic
// when connectClient is nil.
func TestCleanState_NilConnectClient(t *testing.T) {
s := newTestServer()
s.connectClient = nil
s.profileManager = nil // will cause error if it tries to proceed past the nil check
// Should not panic — the nil check should prevent calling Status() on nil
assert.NotPanics(t, func() {
_, _ = s.CleanState(context.Background(), &proto.CleanStateRequest{All: true})
})
}
// TestDeleteState_NilConnectClient validates that DeleteState doesn't panic
// when connectClient is nil.
func TestDeleteState_NilConnectClient(t *testing.T) {
s := newTestServer()
s.connectClient = nil
s.profileManager = nil
assert.NotPanics(t, func() {
_, _ = s.DeleteState(context.Background(), &proto.DeleteStateRequest{All: true})
})
}
// TestDownThenUp_StaleRunningChan documents the known state issue where
// clientRunningChan from a previous connection is already closed, causing
// waitForUp() to return immediately on reconnect.
func TestDownThenUp_StaleRunningChan(t *testing.T) {
s := newTestServer()
// Simulate state after a successful connection
s.clientRunning = true
s.clientRunningChan = make(chan struct{})
close(s.clientRunningChan) // closed when engine started
s.clientGiveUpChan = make(chan struct{})
s.connectClient = newDummyConnectClient(context.Background())
_, cancel := context.WithCancel(context.Background())
s.actCancel = cancel
// Simulate Down(): cleanupConnection sets connectClient = nil
s.mutex.Lock()
err := s.cleanupConnection()
s.mutex.Unlock()
require.NoError(t, err)
// After cleanup: connectClient is nil, clientRunning still true
// (goroutine hasn't exited yet)
s.mutex.Lock()
assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup")
assert.True(t, s.clientRunning, "clientRunning still true until goroutine exits")
s.mutex.Unlock()
// waitForUp() returns immediately due to stale closed clientRunningChan
ctx, ctxCancel := context.WithTimeout(context.Background(), 2*time.Second)
defer ctxCancel()
waitDone := make(chan error, 1)
go func() {
_, err := s.waitForUp(ctx)
waitDone <- err
}()
select {
case err := <-waitDone:
assert.NoError(t, err, "waitForUp returns success on stale channel")
// But connectClient is still nil — this is the stale state issue
s.mutex.Lock()
assert.Nil(t, s.connectClient, "connectClient is nil despite waitForUp success")
s.mutex.Unlock()
case <-time.After(1 * time.Second):
t.Fatal("waitForUp should have returned immediately due to stale closed channel")
}
}
// TestConnectClient_EngineNilOnFreshClient validates that a newly created
// ConnectClient has nil Engine (before Run is called).
func TestConnectClient_EngineNilOnFreshClient(t *testing.T) {
client := newDummyConnectClient(context.Background())
assert.Nil(t, client.Engine(), "engine should be nil on fresh ConnectClient")
}

View File

@@ -113,7 +113,7 @@ func TestConnectWithRetryRuns(t *testing.T) {
t.Setenv(maxRetryTimeVar, "5s")
t.Setenv(retryMultiplierVar, "1")
s.connectWithRetryRuns(ctx, config, s.statusRecorder, false, nil, nil)
s.connectWithRetryRuns(ctx, config, s.statusRecorder, nil, nil)
if counter < 3 {
t.Fatalf("expected counter > 2, got %d", counter)
}

View File

@@ -39,7 +39,7 @@ func (s *Server) ListStates(_ context.Context, _ *proto.ListStatesRequest) (*pro
// CleanState handles cleaning of states (performing cleanup operations)
func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (*proto.CleanStateResponse, error) {
if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting {
if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) {
return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.")
}
@@ -82,7 +82,7 @@ func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (
// DeleteState handles deletion of states without cleanup
func (s *Server) DeleteState(ctx context.Context, req *proto.DeleteStateRequest) (*proto.DeleteStateResponse, error) {
if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting {
if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) {
return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.")
}

View File

@@ -0,0 +1,24 @@
package server
import (
"context"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/proto"
)
// TriggerUpdate initiates installation of the pending enforced version.
// It is called when the user clicks the install button in the UI (Mode 2 / enforced update).
func (s *Server) TriggerUpdate(ctx context.Context, _ *proto.TriggerUpdateRequest) (*proto.TriggerUpdateResponse, error) {
if s.updateManager == nil {
return &proto.TriggerUpdateResponse{Success: false, ErrorMsg: "update manager not available"}, nil
}
if err := s.updateManager.Install(ctx); err != nil {
log.Warnf("TriggerUpdate failed: %v", err)
return &proto.TriggerUpdateResponse{Success: false, ErrorMsg: err.Error()}, nil
}
return &proto.TriggerUpdateResponse{Success: true}, nil
}

View File

@@ -5,7 +5,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
"github.com/netbirdio/netbird/client/internal/updater/installer"
"github.com/netbirdio/netbird/client/proto"
)

View File

@@ -0,0 +1,24 @@
//go:build cgo && !osusergo && !windows
package server
import "os/user"
// lookupWithGetent with CGO delegates directly to os/user.Lookup.
// When CGO is enabled, os/user uses libc (getpwnam_r) which goes through
// the NSS stack natively. If it fails, the user truly doesn't exist and
// getent would also fail.
func lookupWithGetent(username string) (*user.User, error) {
return user.Lookup(username)
}
// currentUserWithGetent with CGO delegates directly to os/user.Current.
func currentUserWithGetent() (*user.User, error) {
return user.Current()
}
// groupIdsWithFallback with CGO delegates directly to user.GroupIds.
// libc's getgrouplist handles NSS groups natively.
func groupIdsWithFallback(u *user.User) ([]string, error) {
return u.GroupIds()
}

View File

@@ -0,0 +1,74 @@
//go:build (!cgo || osusergo) && !windows
package server
import (
"os"
"os/user"
"strconv"
log "github.com/sirupsen/logrus"
)
// lookupWithGetent looks up a user by name, falling back to getent if os/user fails.
// Without CGO, os/user only reads /etc/passwd and misses NSS-provided users.
// getent goes through the host's NSS stack.
func lookupWithGetent(username string) (*user.User, error) {
u, err := user.Lookup(username)
if err == nil {
return u, nil
}
stdErr := err
log.Debugf("os/user.Lookup(%q) failed, trying getent: %v", username, err)
u, _, getentErr := runGetent(username)
if getentErr != nil {
log.Debugf("getent fallback for %q also failed: %v", username, getentErr)
return nil, stdErr
}
return u, nil
}
// currentUserWithGetent gets the current user, falling back to getent if os/user fails.
func currentUserWithGetent() (*user.User, error) {
u, err := user.Current()
if err == nil {
return u, nil
}
stdErr := err
uid := strconv.Itoa(os.Getuid())
log.Debugf("os/user.Current() failed, trying getent with UID %s: %v", uid, err)
u, _, getentErr := runGetent(uid)
if getentErr != nil {
return nil, stdErr
}
return u, nil
}
// groupIdsWithFallback gets group IDs for a user via the id command first,
// falling back to user.GroupIds().
// NOTE: unlike lookupWithGetent/currentUserWithGetent which try stdlib first,
// this intentionally tries `id -G` first because without CGO, user.GroupIds()
// only reads /etc/group and silently returns incomplete results for NSS users
// (no error, just missing groups). The id command goes through NSS and returns
// the full set.
func groupIdsWithFallback(u *user.User) ([]string, error) {
ids, err := runIdGroups(u.Username)
if err == nil {
return ids, nil
}
log.Debugf("id -G %q failed, falling back to user.GroupIds(): %v", u.Username, err)
ids, stdErr := u.GroupIds()
if stdErr != nil {
return nil, stdErr
}
return ids, nil
}

View File

@@ -0,0 +1,172 @@
package server
import (
"os/user"
"runtime"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLookupWithGetent_CurrentUser(t *testing.T) {
// The current user should always be resolvable on any platform
current, err := user.Current()
require.NoError(t, err)
u, err := lookupWithGetent(current.Username)
require.NoError(t, err)
assert.Equal(t, current.Username, u.Username)
assert.Equal(t, current.Uid, u.Uid)
assert.Equal(t, current.Gid, u.Gid)
}
func TestLookupWithGetent_NonexistentUser(t *testing.T) {
_, err := lookupWithGetent("nonexistent_user_xyzzy_12345")
require.Error(t, err, "should fail for nonexistent user")
}
func TestCurrentUserWithGetent(t *testing.T) {
stdUser, err := user.Current()
require.NoError(t, err)
u, err := currentUserWithGetent()
require.NoError(t, err)
assert.Equal(t, stdUser.Uid, u.Uid)
assert.Equal(t, stdUser.Username, u.Username)
}
func TestGroupIdsWithFallback_CurrentUser(t *testing.T) {
current, err := user.Current()
require.NoError(t, err)
groups, err := groupIdsWithFallback(current)
require.NoError(t, err)
require.NotEmpty(t, groups, "current user should have at least one group")
if runtime.GOOS != "windows" {
for _, gid := range groups {
_, err := strconv.ParseUint(gid, 10, 32)
assert.NoError(t, err, "group ID %q should be a valid uint32", gid)
}
}
}
func TestGetShellFromGetent_CurrentUser(t *testing.T) {
if runtime.GOOS == "windows" {
// Windows stub always returns empty, which is correct
shell := getShellFromGetent("1000")
assert.Empty(t, shell, "Windows stub should return empty")
return
}
current, err := user.Current()
require.NoError(t, err)
// getent may not be available on all systems (e.g., macOS without Homebrew getent)
shell := getShellFromGetent(current.Uid)
if shell == "" {
t.Log("getShellFromGetent returned empty, getent may not be available")
return
}
assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell)
}
func TestLookupWithGetent_RootUser(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("no root user on Windows")
}
u, err := lookupWithGetent("root")
if err != nil {
t.Skip("root user not available on this system")
}
assert.Equal(t, "0", u.Uid, "root should have UID 0")
}
// TestIntegration_FullLookupChain exercises the complete user lookup chain
// against the real system, testing that all wrappers (lookupWithGetent,
// currentUserWithGetent, groupIdsWithFallback, getShellFromGetent) produce
// consistent and correct results when composed together.
func TestIntegration_FullLookupChain(t *testing.T) {
// Step 1: currentUserWithGetent must resolve the running user.
current, err := currentUserWithGetent()
require.NoError(t, err, "currentUserWithGetent must resolve the running user")
require.NotEmpty(t, current.Uid)
require.NotEmpty(t, current.Username)
// Step 2: lookupWithGetent by the same username must return matching identity.
byName, err := lookupWithGetent(current.Username)
require.NoError(t, err)
assert.Equal(t, current.Uid, byName.Uid, "lookup by name should return same UID")
assert.Equal(t, current.Gid, byName.Gid, "lookup by name should return same GID")
assert.Equal(t, current.HomeDir, byName.HomeDir, "lookup by name should return same home")
// Step 3: groupIdsWithFallback must return at least the primary GID.
groups, err := groupIdsWithFallback(current)
require.NoError(t, err)
require.NotEmpty(t, groups, "user must have at least one group")
foundPrimary := false
for _, gid := range groups {
if runtime.GOOS != "windows" {
_, err := strconv.ParseUint(gid, 10, 32)
require.NoError(t, err, "group ID %q must be a valid uint32", gid)
}
if gid == current.Gid {
foundPrimary = true
}
}
assert.True(t, foundPrimary, "primary GID %s should appear in supplementary groups", current.Gid)
// Step 4: getShellFromGetent should either return a valid shell path or empty
// (empty is OK when getent is not available, e.g. macOS without Homebrew getent).
if runtime.GOOS != "windows" {
shell := getShellFromGetent(current.Uid)
if shell != "" {
assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell)
}
}
}
// TestIntegration_LookupAndGroupsConsistency verifies that a user resolved via
// lookupWithGetent can have their groups resolved via groupIdsWithFallback,
// testing the handoff between the two functions as used by the SSH server.
func TestIntegration_LookupAndGroupsConsistency(t *testing.T) {
current, err := user.Current()
require.NoError(t, err)
// Simulate the SSH server flow: lookup user, then get their groups.
resolved, err := lookupWithGetent(current.Username)
require.NoError(t, err)
groups, err := groupIdsWithFallback(resolved)
require.NoError(t, err)
require.NotEmpty(t, groups, "resolved user must have groups")
// On Unix, all returned GIDs must be valid numeric values.
// On Windows, group IDs are SIDs (e.g., "S-1-5-32-544").
if runtime.GOOS != "windows" {
for _, gid := range groups {
_, err := strconv.ParseUint(gid, 10, 32)
assert.NoError(t, err, "group ID %q should be numeric", gid)
}
}
}
// TestIntegration_ShellLookupChain tests the full shell resolution chain
// (getShellFromPasswd -> getShellFromGetent -> $SHELL -> default) on Unix.
func TestIntegration_ShellLookupChain(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Unix shell lookup not applicable on Windows")
}
current, err := user.Current()
require.NoError(t, err)
// getUserShell is the top-level function used by the SSH server.
shell := getUserShell(current.Uid)
require.NotEmpty(t, shell, "getUserShell must always return a shell")
assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell)
}

View File

@@ -0,0 +1,122 @@
//go:build !windows
package server
import (
"context"
"fmt"
"os/exec"
"os/user"
"runtime"
"strings"
"time"
)
const getentTimeout = 5 * time.Second
// getShellFromGetent gets a user's login shell via getent by UID.
// This is needed even with CGO because getShellFromPasswd reads /etc/passwd
// directly and won't find NSS-provided users there.
func getShellFromGetent(userID string) string {
_, shell, err := runGetent(userID)
if err != nil {
return ""
}
return shell
}
// runGetent executes `getent passwd <query>` and returns the user and login shell.
func runGetent(query string) (*user.User, string, error) {
if !validateGetentInput(query) {
return nil, "", fmt.Errorf("invalid getent input: %q", query)
}
ctx, cancel := context.WithTimeout(context.Background(), getentTimeout)
defer cancel()
out, err := exec.CommandContext(ctx, "getent", "passwd", query).Output()
if err != nil {
return nil, "", fmt.Errorf("getent passwd %s: %w", query, err)
}
return parseGetentPasswd(string(out))
}
// parseGetentPasswd parses getent passwd output: "name:x:uid:gid:gecos:home:shell"
func parseGetentPasswd(output string) (*user.User, string, error) {
fields := strings.SplitN(strings.TrimSpace(output), ":", 8)
if len(fields) < 6 {
return nil, "", fmt.Errorf("unexpected getent output (need 6+ fields): %q", output)
}
if fields[0] == "" || fields[2] == "" || fields[3] == "" {
return nil, "", fmt.Errorf("missing required fields in getent output: %q", output)
}
var shell string
if len(fields) >= 7 {
shell = fields[6]
}
return &user.User{
Username: fields[0],
Uid: fields[2],
Gid: fields[3],
Name: fields[4],
HomeDir: fields[5],
}, shell, nil
}
// validateGetentInput checks that the input is safe to pass to getent or id.
// Allows POSIX usernames, numeric UIDs, and common NSS extensions
// (@ for Kerberos, $ for Samba, + for NIS compat).
func validateGetentInput(input string) bool {
maxLen := 32
if runtime.GOOS == "linux" {
maxLen = 256
}
if len(input) == 0 || len(input) > maxLen {
return false
}
for _, r := range input {
if isAllowedGetentChar(r) {
continue
}
return false
}
return true
}
func isAllowedGetentChar(r rune) bool {
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' {
return true
}
switch r {
case '.', '_', '-', '@', '+', '$':
return true
}
return false
}
// runIdGroups runs `id -G <username>` and returns the space-separated group IDs.
func runIdGroups(username string) ([]string, error) {
if !validateGetentInput(username) {
return nil, fmt.Errorf("invalid username for id command: %q", username)
}
ctx, cancel := context.WithTimeout(context.Background(), getentTimeout)
defer cancel()
out, err := exec.CommandContext(ctx, "id", "-G", username).Output()
if err != nil {
return nil, fmt.Errorf("id -G %s: %w", username, err)
}
trimmed := strings.TrimSpace(string(out))
if trimmed == "" {
return nil, fmt.Errorf("id -G %s: empty output", username)
}
return strings.Fields(trimmed), nil
}

View File

@@ -0,0 +1,410 @@
//go:build !windows
package server
import (
"os/exec"
"os/user"
"runtime"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParseGetentPasswd(t *testing.T) {
tests := []struct {
name string
input string
wantUser *user.User
wantShell string
wantErr bool
errContains string
}{
{
name: "standard entry",
input: "alice:x:1001:1001:Alice Smith:/home/alice:/bin/bash\n",
wantUser: &user.User{
Username: "alice",
Uid: "1001",
Gid: "1001",
Name: "Alice Smith",
HomeDir: "/home/alice",
},
wantShell: "/bin/bash",
},
{
name: "root entry",
input: "root:x:0:0:root:/root:/bin/bash",
wantUser: &user.User{
Username: "root",
Uid: "0",
Gid: "0",
Name: "root",
HomeDir: "/root",
},
wantShell: "/bin/bash",
},
{
name: "empty gecos field",
input: "svc:x:999:999::/var/lib/svc:/usr/sbin/nologin",
wantUser: &user.User{
Username: "svc",
Uid: "999",
Gid: "999",
Name: "",
HomeDir: "/var/lib/svc",
},
wantShell: "/usr/sbin/nologin",
},
{
name: "gecos with commas",
input: "john:x:1002:1002:John Doe,Room 101,555-1234,555-4321:/home/john:/bin/zsh",
wantUser: &user.User{
Username: "john",
Uid: "1002",
Gid: "1002",
Name: "John Doe,Room 101,555-1234,555-4321",
HomeDir: "/home/john",
},
wantShell: "/bin/zsh",
},
{
name: "remote user with large UID",
input: "remoteuser:*:50001:50001:Remote User:/home/remoteuser:/bin/bash\n",
wantUser: &user.User{
Username: "remoteuser",
Uid: "50001",
Gid: "50001",
Name: "Remote User",
HomeDir: "/home/remoteuser",
},
wantShell: "/bin/bash",
},
{
name: "no shell field (only 6 fields)",
input: "minimal:x:1000:1000::/home/minimal",
wantUser: &user.User{
Username: "minimal",
Uid: "1000",
Gid: "1000",
Name: "",
HomeDir: "/home/minimal",
},
wantShell: "",
},
{
name: "too few fields",
input: "bad:x:1000",
wantErr: true,
errContains: "need 6+ fields",
},
{
name: "empty username",
input: ":x:1000:1000::/home/test:/bin/bash",
wantErr: true,
errContains: "missing required fields",
},
{
name: "empty UID",
input: "test:x::1000::/home/test:/bin/bash",
wantErr: true,
errContains: "missing required fields",
},
{
name: "empty GID",
input: "test:x:1000:::/home/test:/bin/bash",
wantErr: true,
errContains: "missing required fields",
},
{
name: "empty input",
input: "",
wantErr: true,
errContains: "need 6+ fields",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
u, shell, err := parseGetentPasswd(tt.input)
if tt.wantErr {
require.Error(t, err)
if tt.errContains != "" {
assert.Contains(t, err.Error(), tt.errContains)
}
return
}
require.NoError(t, err)
assert.Equal(t, tt.wantUser.Username, u.Username, "username")
assert.Equal(t, tt.wantUser.Uid, u.Uid, "UID")
assert.Equal(t, tt.wantUser.Gid, u.Gid, "GID")
assert.Equal(t, tt.wantUser.Name, u.Name, "name/gecos")
assert.Equal(t, tt.wantUser.HomeDir, u.HomeDir, "home directory")
assert.Equal(t, tt.wantShell, shell, "shell")
})
}
}
func TestValidateGetentInput(t *testing.T) {
tests := []struct {
name string
input string
want bool
}{
{"normal username", "alice", true},
{"numeric UID", "1001", true},
{"dots and underscores", "alice.bob_test", true},
{"hyphen", "alice-bob", true},
{"kerberos principal", "user@REALM", true},
{"samba machine account", "MACHINE$", true},
{"NIS compat", "+user", true},
{"empty", "", false},
{"null byte", "alice\x00bob", false},
{"newline", "alice\nbob", false},
{"tab", "alice\tbob", false},
{"control char", "alice\x01bob", false},
{"DEL char", "alice\x7fbob", false},
{"space rejected", "alice bob", false},
{"semicolon rejected", "alice;bob", false},
{"backtick rejected", "alice`bob", false},
{"pipe rejected", "alice|bob", false},
{"33 chars exceeds non-linux max", makeLongString(33), runtime.GOOS == "linux"},
{"256 chars at linux max", makeLongString(256), runtime.GOOS == "linux"},
{"257 chars exceeds all limits", makeLongString(257), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, validateGetentInput(tt.input))
})
}
}
func makeLongString(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = 'a'
}
return string(b)
}
func TestRunGetent_RootUser(t *testing.T) {
if _, err := exec.LookPath("getent"); err != nil {
t.Skip("getent not available on this system")
}
u, shell, err := runGetent("root")
require.NoError(t, err)
assert.Equal(t, "root", u.Username)
assert.Equal(t, "0", u.Uid)
assert.Equal(t, "0", u.Gid)
assert.NotEmpty(t, shell, "root should have a shell")
}
func TestRunGetent_ByUID(t *testing.T) {
if _, err := exec.LookPath("getent"); err != nil {
t.Skip("getent not available on this system")
}
u, _, err := runGetent("0")
require.NoError(t, err)
assert.Equal(t, "root", u.Username)
assert.Equal(t, "0", u.Uid)
}
func TestRunGetent_NonexistentUser(t *testing.T) {
if _, err := exec.LookPath("getent"); err != nil {
t.Skip("getent not available on this system")
}
_, _, err := runGetent("nonexistent_user_xyzzy_12345")
assert.Error(t, err)
}
func TestRunGetent_InvalidInput(t *testing.T) {
_, _, err := runGetent("")
assert.Error(t, err)
_, _, err = runGetent("user\x00name")
assert.Error(t, err)
}
func TestRunGetent_NotAvailable(t *testing.T) {
if _, err := exec.LookPath("getent"); err == nil {
t.Skip("getent is available, can't test missing case")
}
_, _, err := runGetent("root")
assert.Error(t, err, "should fail when getent is not installed")
}
func TestRunIdGroups_CurrentUser(t *testing.T) {
if _, err := exec.LookPath("id"); err != nil {
t.Skip("id not available on this system")
}
current, err := user.Current()
require.NoError(t, err)
groups, err := runIdGroups(current.Username)
require.NoError(t, err)
require.NotEmpty(t, groups, "current user should have at least one group")
for _, gid := range groups {
_, err := strconv.ParseUint(gid, 10, 32)
assert.NoError(t, err, "group ID %q should be a valid uint32", gid)
}
}
func TestRunIdGroups_NonexistentUser(t *testing.T) {
if _, err := exec.LookPath("id"); err != nil {
t.Skip("id not available on this system")
}
_, err := runIdGroups("nonexistent_user_xyzzy_12345")
assert.Error(t, err)
}
func TestRunIdGroups_InvalidInput(t *testing.T) {
_, err := runIdGroups("")
assert.Error(t, err)
_, err = runIdGroups("user\x00name")
assert.Error(t, err)
}
func TestGetentResultsMatchStdlib(t *testing.T) {
if _, err := exec.LookPath("getent"); err != nil {
t.Skip("getent not available on this system")
}
current, err := user.Current()
require.NoError(t, err)
getentUser, _, err := runGetent(current.Username)
require.NoError(t, err)
assert.Equal(t, current.Username, getentUser.Username, "username should match")
assert.Equal(t, current.Uid, getentUser.Uid, "UID should match")
assert.Equal(t, current.Gid, getentUser.Gid, "GID should match")
assert.Equal(t, current.HomeDir, getentUser.HomeDir, "home directory should match")
}
func TestGetentResultsMatchStdlib_ByUID(t *testing.T) {
if _, err := exec.LookPath("getent"); err != nil {
t.Skip("getent not available on this system")
}
current, err := user.Current()
require.NoError(t, err)
getentUser, _, err := runGetent(current.Uid)
require.NoError(t, err)
assert.Equal(t, current.Username, getentUser.Username, "username should match when looked up by UID")
assert.Equal(t, current.Uid, getentUser.Uid, "UID should match")
}
func TestIdGroupsMatchStdlib(t *testing.T) {
if _, err := exec.LookPath("id"); err != nil {
t.Skip("id not available on this system")
}
current, err := user.Current()
require.NoError(t, err)
stdGroups, err := current.GroupIds()
if err != nil {
t.Skip("os/user.GroupIds() not working, likely CGO_ENABLED=0")
}
idGroups, err := runIdGroups(current.Username)
require.NoError(t, err)
// Deduplicate both lists: id -G can return duplicates (e.g., root in Docker)
// and ElementsMatch treats duplicates as distinct.
assert.ElementsMatch(t, uniqueStrings(stdGroups), uniqueStrings(idGroups), "id -G should return same groups as os/user")
}
func uniqueStrings(ss []string) []string {
seen := make(map[string]struct{}, len(ss))
out := make([]string, 0, len(ss))
for _, s := range ss {
if _, ok := seen[s]; ok {
continue
}
seen[s] = struct{}{}
out = append(out, s)
}
return out
}
// TestGetShellFromPasswd_CurrentUser verifies that getShellFromPasswd correctly
// reads the current user's shell from /etc/passwd by comparing it against what
// getent reports (which goes through NSS).
func TestGetShellFromPasswd_CurrentUser(t *testing.T) {
current, err := user.Current()
require.NoError(t, err)
shell := getShellFromPasswd(current.Uid)
if shell == "" {
t.Skip("current user not found in /etc/passwd (may be an NSS-only user)")
}
assert.True(t, shell[0] == '/', "shell should be an absolute path, got %q", shell)
if _, err := exec.LookPath("getent"); err == nil {
_, getentShell, getentErr := runGetent(current.Uid)
if getentErr == nil && getentShell != "" {
assert.Equal(t, getentShell, shell, "shell from /etc/passwd should match getent")
}
}
}
// TestGetShellFromPasswd_RootUser verifies that getShellFromPasswd can read
// root's shell from /etc/passwd. Root is guaranteed to be in /etc/passwd on
// any standard Unix system.
func TestGetShellFromPasswd_RootUser(t *testing.T) {
shell := getShellFromPasswd("0")
require.NotEmpty(t, shell, "root (UID 0) must be in /etc/passwd")
assert.True(t, shell[0] == '/', "root shell should be an absolute path, got %q", shell)
}
// TestGetShellFromPasswd_NonexistentUID verifies that getShellFromPasswd
// returns empty for a UID that doesn't exist in /etc/passwd.
func TestGetShellFromPasswd_NonexistentUID(t *testing.T) {
shell := getShellFromPasswd("4294967294")
assert.Empty(t, shell, "nonexistent UID should return empty shell")
}
// TestGetShellFromPasswd_MatchesGetentForKnownUsers reads /etc/passwd directly
// and cross-validates every entry against getent to ensure parseGetentPasswd
// and getShellFromPasswd agree on shell values.
func TestGetShellFromPasswd_MatchesGetentForKnownUsers(t *testing.T) {
if _, err := exec.LookPath("getent"); err != nil {
t.Skip("getent not available")
}
// Pick a few well-known system UIDs that are virtually always in /etc/passwd.
uids := []string{"0"} // root
current, err := user.Current()
require.NoError(t, err)
uids = append(uids, current.Uid)
for _, uid := range uids {
passwdShell := getShellFromPasswd(uid)
if passwdShell == "" {
continue
}
_, getentShell, err := runGetent(uid)
if err != nil {
continue
}
assert.Equal(t, getentShell, passwdShell, "shell mismatch for UID %s", uid)
}
}

View File

@@ -0,0 +1,26 @@
//go:build windows
package server
import "os/user"
// lookupWithGetent on Windows just delegates to os/user.Lookup.
// Windows does not use NSS/getent; its user lookup works without CGO.
func lookupWithGetent(username string) (*user.User, error) {
return user.Lookup(username)
}
// currentUserWithGetent on Windows just delegates to os/user.Current.
func currentUserWithGetent() (*user.User, error) {
return user.Current()
}
// getShellFromGetent is a no-op on Windows; shell resolution uses PowerShell detection.
func getShellFromGetent(_ string) string {
return ""
}
// groupIdsWithFallback on Windows just delegates to u.GroupIds().
func groupIdsWithFallback(u *user.User) ([]string, error) {
return u.GroupIds()
}

View File

@@ -49,10 +49,14 @@ func getWindowsUserShell() string {
return `C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe`
}
// getUnixUserShell returns the shell for Unix-like systems
// getUnixUserShell returns the shell for Unix-like systems.
// Tries /etc/passwd first (fast, no subprocess), falls back to getent for NSS users.
func getUnixUserShell(userID string) string {
shell := getShellFromPasswd(userID)
if shell != "" {
if shell := getShellFromPasswd(userID); shell != "" {
return shell
}
if shell := getShellFromGetent(userID); shell != "" {
return shell
}

View File

@@ -23,8 +23,8 @@ func isPlatformUnix() bool {
// Dependency injection variables for testing - allows mocking dynamic runtime checks
var (
getCurrentUser = user.Current
lookupUser = user.Lookup
getCurrentUser = currentUserWithGetent
lookupUser = lookupWithGetent
getCurrentOS = func() string { return runtime.GOOS }
getIsProcessPrivileged = isCurrentProcessPrivileged

View File

@@ -146,32 +146,30 @@ func (s *Server) parseUserCredentials(localUser *user.User) (uint32, uint32, []u
}
gid := uint32(gid64)
groups, err := s.getSupplementaryGroups(localUser.Username)
if err != nil {
log.Warnf("failed to get supplementary groups for user %s: %v", localUser.Username, err)
groups, err := s.getSupplementaryGroups(localUser)
if err != nil || len(groups) == 0 {
if err != nil {
log.Warnf("failed to get supplementary groups for user %s: %v", localUser.Username, err)
}
groups = []uint32{gid}
}
return uid, gid, groups, nil
}
// getSupplementaryGroups retrieves supplementary group IDs for a user
func (s *Server) getSupplementaryGroups(username string) ([]uint32, error) {
u, err := user.Lookup(username)
// getSupplementaryGroups retrieves supplementary group IDs for a user.
// Uses id/getent fallback for NSS users in CGO_ENABLED=0 builds.
func (s *Server) getSupplementaryGroups(u *user.User) ([]uint32, error) {
groupIDStrings, err := groupIdsWithFallback(u)
if err != nil {
return nil, fmt.Errorf("lookup user %s: %w", username, err)
}
groupIDStrings, err := u.GroupIds()
if err != nil {
return nil, fmt.Errorf("get group IDs for user %s: %w", username, err)
return nil, fmt.Errorf("get group IDs for user %s: %w", u.Username, err)
}
groups := make([]uint32, len(groupIDStrings))
for i, gidStr := range groupIDStrings {
gid64, err := strconv.ParseUint(gidStr, 10, 32)
if err != nil {
return nil, fmt.Errorf("invalid group ID %s for user %s: %w", gidStr, username, err)
return nil, fmt.Errorf("invalid group ID %s for user %s: %w", gidStr, u.Username, err)
}
groups[i] = uint32(gid64)
}

View File

@@ -34,7 +34,6 @@ import (
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
protobuf "google.golang.org/protobuf/proto"
"github.com/netbirdio/netbird/client/iface"
"github.com/netbirdio/netbird/client/internal"
@@ -308,10 +307,11 @@ type serviceClient struct {
sshJWTCacheTTL int
connected bool
update *version.Update
daemonVersion string
updateIndicationLock sync.Mutex
isUpdateIconActive bool
isEnforcedUpdate bool
lastNotifiedVersion string
settingsEnabled bool
profilesEnabled bool
showNetworks bool
@@ -323,7 +323,7 @@ type serviceClient struct {
exitNodeMu sync.Mutex
mExitNodeItems []menuHandler
exitNodeStates []exitNodeState
exitNodeRetryCancel context.CancelFunc
mExitNodeDeselectAll *systray.MenuItem
logFile string
wLoginURL fyne.Window
@@ -367,7 +367,6 @@ func newServiceClient(args *newServiceClientArgs) *serviceClient {
showAdvancedSettings: args.showSettings,
showNetworks: args.showNetworks,
update: version.NewUpdateAndStart("nb/client-ui"),
}
s.eventHandler = newEventHandler(s)
@@ -828,7 +827,7 @@ func (s *serviceClient) handleSSOLogin(ctx context.Context, loginResp *proto.Log
return nil
}
func (s *serviceClient) menuUpClick(ctx context.Context, wannaAutoUpdate bool) error {
func (s *serviceClient) menuUpClick(ctx context.Context) error {
systray.SetTemplateIcon(iconConnectingMacOS, s.icConnecting)
conn, err := s.getSrvClient(defaultFailTimeout)
if err != nil {
@@ -850,9 +849,7 @@ func (s *serviceClient) menuUpClick(ctx context.Context, wannaAutoUpdate bool) e
return nil
}
if _, err := s.conn.Up(s.ctx, &proto.UpRequest{
AutoUpdate: protobuf.Bool(wannaAutoUpdate),
}); err != nil {
if _, err := s.conn.Up(s.ctx, &proto.UpRequest{}); err != nil {
return fmt.Errorf("start connection: %w", err)
}
@@ -924,7 +921,7 @@ func (s *serviceClient) updateStatus() error {
s.mDown.Enable()
s.mNetworks.Enable()
s.mExitNode.Enable()
go s.updateExitNodes()
s.startExitNodeRefresh()
systrayIconState = true
case status.Status == string(internal.StatusConnecting):
s.setConnectingStatus()
@@ -933,13 +930,13 @@ func (s *serviceClient) updateStatus() error {
systrayIconState = false
}
// the updater struct notify by the upgrades available only, but if meanwhile the daemon has successfully
// updated must reset the mUpdate visibility state
// if the daemon version changed (e.g. after a successful update), reset the update indication
if s.daemonVersion != status.DaemonVersion {
s.mUpdate.Hide()
if s.daemonVersion != "" {
s.mUpdate.Hide()
s.isUpdateIconActive = false
}
s.daemonVersion = status.DaemonVersion
s.isUpdateIconActive = s.update.SetDaemonVersion(status.DaemonVersion)
if !s.isUpdateIconActive {
if systrayIconState {
systray.SetTemplateIcon(iconConnectedMacOS, s.icConnected)
@@ -985,6 +982,7 @@ func (s *serviceClient) setDisconnectedStatus() {
s.mUp.Enable()
s.mNetworks.Disable()
s.mExitNode.Disable()
s.cancelExitNodeRetry()
go s.updateExitNodes()
}
@@ -1090,7 +1088,6 @@ func (s *serviceClient) onTrayReady() {
// update exit node menu in case service is already connected
go s.updateExitNodes()
s.update.SetOnUpdateListener(s.onUpdateAvailable)
go func() {
s.getSrvConfig()
time.Sleep(100 * time.Millisecond) // To prevent race condition caused by systray not being fully initialized and ignoring setIcon
@@ -1134,6 +1131,13 @@ func (s *serviceClient) onTrayReady() {
}
}
})
s.eventManager.AddHandler(func(event *proto.SystemEvent) {
if newVersion, ok := event.Metadata["new_version_available"]; ok {
_, enforced := event.Metadata["enforced"]
log.Infof("received new_version_available event: version=%s enforced=%v", newVersion, enforced)
s.onUpdateAvailable(newVersion, enforced)
}
})
go s.eventManager.Start(s.ctx)
go s.eventHandler.listen(s.ctx)
@@ -1506,10 +1510,18 @@ func protoConfigToConfig(cfg *proto.GetConfigResponse) *profilemanager.Config {
return &config
}
func (s *serviceClient) onUpdateAvailable() {
func (s *serviceClient) onUpdateAvailable(newVersion string, enforced bool) {
s.updateIndicationLock.Lock()
defer s.updateIndicationLock.Unlock()
s.isEnforcedUpdate = enforced
if enforced {
s.mUpdate.SetTitle("Install version " + newVersion)
} else {
s.lastNotifiedVersion = ""
s.mUpdate.SetTitle("Download latest version")
}
s.mUpdate.Show()
s.isUpdateIconActive = true
@@ -1518,6 +1530,11 @@ func (s *serviceClient) onUpdateAvailable() {
} else {
systray.SetTemplateIcon(iconUpdateDisconnectedMacOS, s.icUpdateDisconnected)
}
if enforced && s.lastNotifiedVersion != newVersion {
s.lastNotifiedVersion = newVersion
s.app.SendNotification(fyne.NewNotification("Update available", "A new version "+newVersion+" is ready to install"))
}
}
// onSessionExpire sends a notification to the user when the session expires.

View File

@@ -107,12 +107,7 @@ func (e *Manager) handleEvent(event *proto.SystemEvent) {
handlers := slices.Clone(e.handlers)
e.mu.Unlock()
// critical events are always shown
if !enabled && event.Severity != proto.SystemEvent_CRITICAL {
return
}
if event.UserMessage != "" {
if event.UserMessage != "" && (enabled || event.Severity == proto.SystemEvent_CRITICAL) {
title := e.getEventTitle(event)
body := event.UserMessage
id := event.Metadata["id"]

View File

@@ -82,7 +82,7 @@ func (h *eventHandler) handleConnectClick() {
go func() {
defer connectCancel()
if err := h.client.menuUpClick(connectCtx, true); err != nil {
if err := h.client.menuUpClick(connectCtx); err != nil {
st, ok := status.FromError(err)
if errors.Is(err, context.Canceled) || (ok && st.Code() == codes.Canceled) {
log.Debugf("connect operation cancelled by user")
@@ -100,8 +100,7 @@ func (h *eventHandler) handleConnectClick() {
func (h *eventHandler) handleDisconnectClick() {
h.client.mDown.Disable()
h.client.exitNodeStates = []exitNodeState{}
h.client.cancelExitNodeRetry()
if h.client.connectCancel != nil {
log.Debugf("cancelling ongoing connect operation")
@@ -212,9 +211,42 @@ func (h *eventHandler) handleGitHubClick() {
}
func (h *eventHandler) handleUpdateClick() {
if err := openURL(version.DownloadUrl()); err != nil {
log.Errorf("failed to open download URL: %v", err)
h.client.updateIndicationLock.Lock()
enforced := h.client.isEnforcedUpdate
h.client.updateIndicationLock.Unlock()
if !enforced {
if err := openURL(version.DownloadUrl()); err != nil {
log.Errorf("failed to open download URL: %v", err)
}
return
}
// prevent blocking against a busy server
h.client.mUpdate.Disable()
go func() {
defer h.client.mUpdate.Enable()
conn, err := h.client.getSrvClient(defaultFailTimeout)
if err != nil {
log.Errorf("failed to get service client for update: %v", err)
_ = openURL(version.DownloadUrl())
return
}
resp, err := conn.TriggerUpdate(h.client.ctx, &proto.TriggerUpdateRequest{})
if err != nil {
log.Errorf("TriggerUpdate failed: %v", err)
_ = openURL(version.DownloadUrl())
return
}
if !resp.Success {
log.Errorf("TriggerUpdate failed: %s", resp.ErrorMsg)
_ = openURL(version.DownloadUrl())
return
}
log.Infof("update triggered via daemon")
}()
}
func (h *eventHandler) handleNetworksClick() {

View File

@@ -6,7 +6,6 @@ import (
"context"
"fmt"
"runtime"
"slices"
"sort"
"strings"
"time"
@@ -34,11 +33,6 @@ const (
type filter string
type exitNodeState struct {
id string
selected bool
}
func (s *serviceClient) showNetworksUI() {
s.wNetworks = s.app.NewWindow("Networks")
s.wNetworks.SetOnClosed(s.cancel)
@@ -335,16 +329,75 @@ func (s *serviceClient) updateNetworksBasedOnDisplayTab(tabs *container.AppTabs,
s.updateNetworks(grid, f)
}
func (s *serviceClient) updateExitNodes() {
// startExitNodeRefresh initiates exit node menu refresh after connecting.
// On Windows, TrayOpenedCh is not supported by the systray library, so we use
// a background poller to keep exit nodes in sync while connected.
// On macOS/Linux, TrayOpenedCh handles refreshes on each tray open.
func (s *serviceClient) startExitNodeRefresh() {
s.cancelExitNodeRetry()
if runtime.GOOS == "windows" {
ctx, cancel := context.WithCancel(s.ctx)
s.exitNodeMu.Lock()
s.exitNodeRetryCancel = cancel
s.exitNodeMu.Unlock()
go s.pollExitNodes(ctx)
} else {
go s.updateExitNodes()
}
}
func (s *serviceClient) cancelExitNodeRetry() {
s.exitNodeMu.Lock()
if s.exitNodeRetryCancel != nil {
s.exitNodeRetryCancel()
s.exitNodeRetryCancel = nil
}
s.exitNodeMu.Unlock()
}
// pollExitNodes periodically refreshes exit nodes while connected.
// Uses a short initial interval to catch routes from the management sync,
// then switches to a longer interval for ongoing updates.
func (s *serviceClient) pollExitNodes(ctx context.Context) {
// Initial fast polling to catch routes as they appear after connect.
for i := 0; i < 5; i++ {
if s.updateExitNodes() {
break
}
select {
case <-ctx.Done():
return
case <-time.After(2 * time.Second):
}
}
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
s.updateExitNodes()
}
}
}
// updateExitNodes fetches exit nodes from the daemon and recreates the menu.
// Returns true if exit nodes were found.
func (s *serviceClient) updateExitNodes() bool {
conn, err := s.getSrvClient(defaultFailTimeout)
if err != nil {
log.Errorf("get client: %v", err)
return
return false
}
exitNodes, err := s.getExitNodes(conn)
if err != nil {
log.Errorf("get exit nodes: %v", err)
return
return false
}
s.exitNodeMu.Lock()
@@ -354,28 +407,14 @@ func (s *serviceClient) updateExitNodes() {
if len(s.mExitNodeItems) > 0 {
s.mExitNode.Enable()
} else {
s.mExitNode.Disable()
return true
}
s.mExitNode.Disable()
return false
}
func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) {
var exitNodeIDs []exitNodeState
for _, node := range exitNodes {
exitNodeIDs = append(exitNodeIDs, exitNodeState{
id: node.ID,
selected: node.Selected,
})
}
sort.Slice(exitNodeIDs, func(i, j int) bool {
return exitNodeIDs[i].id < exitNodeIDs[j].id
})
if slices.Equal(s.exitNodeStates, exitNodeIDs) {
log.Debug("Exit node menu already up to date")
return
}
for _, node := range s.mExitNodeItems {
node.cancel()
node.Hide()
@@ -413,8 +452,6 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) {
go s.handleChecked(ctx, node.ID, menuItem)
}
s.exitNodeStates = exitNodeIDs
if showDeselectAll {
s.mExitNode.AddSeparator()
deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All")

View File

@@ -397,7 +397,7 @@ type profileMenu struct {
logoutSubItem *subItem
profilesState []Profile
downClickCallback func() error
upClickCallback func(context.Context, bool) error
upClickCallback func(context.Context) error
getSrvClientCallback func(timeout time.Duration) (proto.DaemonServiceClient, error)
loadSettingsCallback func()
app fyne.App
@@ -411,7 +411,7 @@ type newProfileMenuArgs struct {
profileMenuItem *systray.MenuItem
emailMenuItem *systray.MenuItem
downClickCallback func() error
upClickCallback func(context.Context, bool) error
upClickCallback func(context.Context) error
getSrvClientCallback func(timeout time.Duration) (proto.DaemonServiceClient, error)
loadSettingsCallback func()
app fyne.App
@@ -579,7 +579,7 @@ func (p *profileMenu) refresh() {
connectCtx, connectCancel := context.WithCancel(p.ctx)
p.serviceClient.connectCancel = connectCancel
if err := p.upClickCallback(connectCtx, false); err != nil {
if err := p.upClickCallback(connectCtx); err != nil {
log.Errorf("failed to handle up click after switching profile: %v", err)
}

View File

@@ -267,7 +267,7 @@ func (s *serviceClient) showQuickActionsUI() {
connCmd := connectCommand{
connectClient: func() error {
return s.menuUpClick(s.ctx, false)
return s.menuUpClick(s.ctx)
},
}