From d841a6aa07c39f4fa1f65cc56902a72a1fce7b6c Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 14 May 2026 14:51:51 +0200 Subject: [PATCH 1/6] [client] Push status snapshot on every state.Set and classify SSO errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two related daemon-side status-stream fixes that together keep the UI's status in sync with the daemon's contextState: * state.Set previously only mutated the in-memory enum — transitions that weren't accompanied by a Mark{Management,Signal,...} call (e.g. StatusNeedsLogin after a PermissionDenied login, StatusLoginFailed after OAuth init failure, StatusIdle in the Login defer) left the UI stuck on the previous snapshot until an unrelated peer event happened to fire notifyStateChange. Add a callback on contextState fired from Set (outside the mutex, to avoid lock-order issues with the recorder's stateChangeMux), and wire it in Server.Start to the recorder's new public NotifyStateChange. Every state.Set callsite now pushes automatically; new ones don't need to opt in. * WaitSSOLogin's WaitToken error branch lumped every failure into StatusLoginFailed, including context.Canceled aborts from a parallel profile switch (actCancel/waitCancel). That spurious LoginFailed then wedged the new profile's Up RPC with "up already in progress: current status LoginFailed". Split the branch by error type: context.Canceled lets the top-level defer pick StatusIdle, context.DeadlineExceeded sets StatusNeedsLogin (retryable; OAuth device-code window just expired), other errors keep LoginFailed (real auth/IO failures). Document the full state-transition table in the function godoc. --- client/internal/peer/status.go | 12 ++++++++ client/internal/state.go | 27 +++++++++++++--- client/server/server.go | 56 ++++++++++++++++++++++++++++++++-- 3 files changed, 87 insertions(+), 8 deletions(-) diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index a24741e4f..12501a424 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -1281,6 +1281,18 @@ func (d *Status) notifyStateChange() { } } +// NotifyStateChange is the public wake-the-subscribers entry point used by +// callers that mutate state outside the peer recorder — most importantly +// the connect-state machine, which writes StatusNeedsLogin into the +// shared contextState (client/internal/state.go) without touching any +// recorder field. Without this push the SubscribeStatus stream stays on +// the previous snapshot until an unrelated peer/management/signal +// change happens to fire notifyStateChange, leaving the UI's status +// out of sync with the daemon. +func (d *Status) NotifyStateChange() { + d.notifyStateChange() +} + func (d *Status) SetWgIface(wgInterface WGIfaceStatus) { d.mux.Lock() defer d.mux.Unlock() diff --git a/client/internal/state.go b/client/internal/state.go index 20db33ecc..0adfa26e4 100644 --- a/client/internal/state.go +++ b/client/internal/state.go @@ -33,17 +33,34 @@ func CtxGetState(ctx context.Context) *contextState { } type contextState struct { - err error - status StatusType - mutex sync.Mutex + err error + status StatusType + mutex sync.Mutex + onChange func() +} + +// SetOnChange installs a callback fired after every successful Set. Used by +// the daemon to wire the status recorder's notifyStateChange so any +// state.Set in the connect/login paths pushes a fresh snapshot to +// SubscribeStatus subscribers without each callsite having to opt in. +// The callback runs outside the contextState mutex to avoid a lock-order +// dependency with the recorder's stateChangeMux. +func (c *contextState) SetOnChange(fn func()) { + c.mutex.Lock() + c.onChange = fn + c.mutex.Unlock() } func (c *contextState) Set(update StatusType) { c.mutex.Lock() - defer c.mutex.Unlock() - c.status = update c.err = nil + cb := c.onChange + c.mutex.Unlock() + + if cb != nil { + cb() + } } func (c *contextState) Status() (StatusType, error) { diff --git a/client/server/server.go b/client/server/server.go index 0bc6358e3..0b42b271d 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -140,6 +140,15 @@ func (s *Server) Start() error { } state := internal.CtxGetState(s.rootCtx) + // Every contextState.Set in the connect/login/server paths must push a + // SubscribeStatus snapshot, otherwise transitions that don't happen to + // be accompanied by a Mark{Management,Signal,...} call (e.g. plain + // StatusNeedsLogin after a PermissionDenied login, StatusLoginFailed + // after OAuth init failure, StatusIdle in the Login defer) leave the + // UI stuck on the previous status until the next unrelated peer event. + // Binding the recorder here means new state.Set callsites don't have + // to opt in individually. + state.SetOnChange(s.statusRecorder.NotifyStateChange) if err := handlePanicLog(); err != nil { log.Warnf("failed to redirect stderr: %v", err) @@ -571,8 +580,35 @@ func (s *Server) Login(callerCtx context.Context, msg *proto.LoginRequest) (*pro return &proto.LoginResponse{}, nil } -// WaitSSOLogin uses the userCode to validate the TokenInfo and -// waits for the user to continue with the login on a browser +// WaitSSOLogin validates the supplied userCode against the in-flight OAuth +// device/PKCE flow and blocks until the user finishes the browser leg. +// +// State transitions on exit: +// +// ┌──────────────────────────────────────────┬──────────────────────────────────┐ +// │ Outcome │ contextState │ +// ├──────────────────────────────────────────┼──────────────────────────────────┤ +// │ Success → loginAttempt → Connected │ StatusConnected (loginAttempt) │ +// │ Success → loginAttempt → still-NeedsLogin│ StatusNeedsLogin (loginAttempt) │ +// │ Success → loginAttempt error │ StatusLoginFailed (loginAttempt) │ +// │ UserCode mismatch │ StatusLoginFailed │ +// │ WaitToken: context.Canceled (external │ defer runs: status untouched if │ +// │ abort — profile switch invokes │ already NeedsLogin/LoginFailed,│ +// │ actCancel/waitCancel, app quit, │ else StatusIdle. Keeps the │ +// │ another WaitSSOLogin started) │ cancel from leaking as a │ +// │ │ spurious LoginFailed on the │ +// │ │ next profile's Up. │ +// │ WaitToken: context.DeadlineExceeded │ StatusNeedsLogin │ +// │ (OAuth device-code window expired │ (retryable; the UI's "Connect" │ +// │ while waiting on the browser leg) │ re-enters the Login flow) │ +// │ WaitToken: any other error │ StatusLoginFailed │ +// │ (access_denied, expired_token, HTTP │ (genuine auth/IO failure; │ +// │ failure, token validation rejection) │ surfaced verbatim to caller) │ +// └──────────────────────────────────────────┴──────────────────────────────────┘ +// +// The defer at the top of the function applies the Idle fallback so callers +// that bypass the explicit Set calls (the Canceled branch above, the success +// path before loginAttempt) still land on a sensible terminal status. func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLoginRequest) (*proto.WaitSSOLoginResponse, error) { s.mutex.Lock() if s.actCancel != nil { @@ -632,7 +668,21 @@ func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLogin s.mutex.Lock() s.oauthAuthFlow.expiresAt = time.Now() s.mutex.Unlock() - state.Set(internal.StatusLoginFailed) + switch { + case errors.Is(err, context.Canceled): + // External abort (profile switch, app quit, another + // WaitSSOLogin started). Not a login failure — let the + // top-level defer fall through to StatusIdle so the next + // flow starts from a clean state. + case errors.Is(err, context.DeadlineExceeded): + // OAuth device-code window expired with no user action. + // Retryable — leave the daemon in NeedsLogin so the UI + // keeps the Login affordance instead of reading as a + // hard failure. + state.Set(internal.StatusNeedsLogin) + default: + state.Set(internal.StatusLoginFailed) + } log.Errorf("waiting for browser login failed: %v", err) return nil, err } From fc1db63fc3844ac47a67e4bbb2a5da7f1822fcf3 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 14 May 2026 14:52:03 +0200 Subject: [PATCH 2/6] [client/ui] Fix profile-submenu race, restore Connect re-auth flow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three tray fixes after the SubscribeStatus stream refactor: * loadProfiles now serializes via a dedicated profileLoadMu and runs AFTER the SetHidden/SetEnabled writes inside applyStatus's iconChanged block. Previously the status-driven refresh fired before the menu-item writes finished, so processMenu's clearMenu/re-add NSMenu rebuild raced against SetHidden on darwin — the Disconnect entry could end up visible-but-disabled even when applyStatus had requested it hidden. * The status row is no longer a hidden "Login" entry. It now renders as a plain enabled label (so the text isn't greyed out) but has no OnClick handler — clicks are no-ops, matching the legacy Fyne UI. All actionable transitions flow through Connect/Disconnect. * handleConnect routes NeedsLogin/SessionExpired/LoginFailed to the frontend's /login route (which already runs Login → WaitSSOLogin → Up) instead of calling Up directly. The plain Up RPC errors with "up already in progress: current status NeedsLogin" in those states; the legacy Fyne UI drove the SSO dance from the Connect button as well. --- client/ui/tray.go | 82 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/client/ui/tray.go b/client/ui/tray.go index a861d84ec..1f6ab0d53 100644 --- a/client/ui/tray.go +++ b/client/ui/tray.go @@ -133,6 +133,13 @@ type Tray struct { activeProfile string activeUsername string switchCancel context.CancelFunc + + // profileLoadMu serializes loadProfiles so the daemon-status-driven + // refresh in applyStatus cannot race with the ApplicationStarted seed + // or the post-switchProfile reload — both manipulate profileSubmenu and + // SetMenu, which the Wails menu API is not safe against concurrent + // callers. + profileLoadMu sync.Mutex } func NewTray(app *application.App, window *application.WebviewWindow, svc TrayServices) *Tray { @@ -189,13 +196,14 @@ func (t *Tray) ShowWindow() { func (t *Tray) buildMenu() *application.Menu { menu := application.NewMenu() - // statusItem doubles as the "Login" entry once the daemon reports - // NeedsLogin/SessionExpired — applyStatus toggles its enabled state and - // label. The click handler is harmless while disabled, so we wire it - // up unconditionally rather than swapping items at runtime. + // statusItem shows the daemon's current status. Kept enabled so the + // label renders in the normal foreground colour (NSMenuItem greys + // disabled items, which makes the live status read like inactive + // chrome), but no OnClick handler is wired — clicking is a no-op. + // The Connect entry below drives every actionable transition, + // including the SSO re-auth flow for NeedsLogin/SessionExpired + // (the daemon's Up RPC returns NeedsSSOLogin when applicable). t.statusItem = menu.Add(menuStatusDisconnected). - OnClick(func(*application.Context) { t.openRoute("/login") }). - SetEnabled(false). SetBitmap(iconMenuDotIdle) menu.AddSeparator() @@ -275,6 +283,20 @@ func (t *Tray) openRoute(route string) { } func (t *Tray) handleConnect() { + // NeedsLogin/SessionExpired/LoginFailed mean the daemon won't honor a + // plain Up RPC ("up already in progress: current status NeedsLogin") — + // it needs the Login → WaitSSOLogin → Up sequence instead. The + // frontend's /login route already drives that flow, so route the + // click there rather than re-implementing the SSO dance in the tray. + t.mu.Lock() + needsLogin := strings.EqualFold(t.lastStatus, services.StatusNeedsLogin) || + strings.EqualFold(t.lastStatus, services.StatusSessionExpired) || + strings.EqualFold(t.lastStatus, services.StatusLoginFailed) + t.mu.Unlock() + if needsLogin { + t.openRoute("/login") + return + } t.upItem.SetEnabled(false) go func() { ctx, cancel := context.WithCancel(context.Background()) @@ -475,16 +497,13 @@ func (t *Tray) applyStatus(st services.Status) { if iconChanged { t.applyIcon() - needsLogin := strings.EqualFold(st.Status, services.StatusNeedsLogin) || - strings.EqualFold(st.Status, services.StatusSessionExpired) || - strings.EqualFold(st.Status, services.StatusLoginFailed) daemonUnavailable := strings.EqualFold(st.Status, services.StatusDaemonUnavailable) connecting := strings.EqualFold(st.Status, services.StatusConnecting) if t.statusItem != nil { - // When the daemon needs re-authentication the status row turns - // into the actionable Login entry — Connect would only fail. - // When the daemon socket is unreachable, swap the label to make - // the cause obvious; Connect/Disconnect would just fail. + // Label-only: enabled so the text isn't greyed out, but no + // click handler is bound (see buildMenu). Swap the displayed + // text so the user sees a familiar phrase instead of the raw + // daemon enum. label := st.Status switch { case daemonUnavailable: @@ -493,16 +512,19 @@ func (t *Tray) applyStatus(st services.Status) { label = menuStatusDisconnected } t.statusItem.SetLabel(label) - t.statusItem.SetEnabled(needsLogin) + t.statusItem.SetEnabled(true) t.applyStatusIndicator(st.Status) } if t.upItem != nil { - // Hide Connect whenever an Up action would be a no-op or would - // only fail: tunnel already up, daemon mid-connect (Disconnect - // takes over the slot so the user can abort), login required, - // or daemon socket unreachable. - t.upItem.SetHidden(connected || connecting || needsLogin || daemonUnavailable) - t.upItem.SetEnabled(!connected && !connecting && !needsLogin && !daemonUnavailable) + // Connect stays visible/clickable in NeedsLogin/SessionExpired/ + // LoginFailed too — the daemon's Up RPC kicks off the SSO flow + // when re-auth is required, mirroring the legacy Fyne client + // where the same button drove the initial and the re-login + // paths. Hidden only when the action would be a no-op (tunnel + // up, daemon mid-connect — Disconnect takes the slot) or + // would fail with no useful side effect (daemon unreachable). + t.upItem.SetHidden(connected || connecting || daemonUnavailable) + t.upItem.SetEnabled(!connected && !connecting && !daemonUnavailable) } if t.downItem != nil { // Disconnect is the abort path while the daemon is still @@ -526,6 +548,16 @@ func (t *Tray) applyStatus(st services.Status) { if t.debugItem != nil { t.debugItem.SetEnabled(!daemonUnavailable) } + // Refresh the Profiles submenu on every status-text transition: the + // daemon does not emit an active-profile event, so the startup race + // (UI loads profiles before autoconnect picks the persisted profile) + // and a CLI "profile select && up" both surface here. Fired AFTER + // all SetHidden/SetEnabled writes on the static menu items above so + // loadProfiles' SetMenu rebuild (which clearMenu+processMenu the + // entire NSMenu and re-assigns item.impl) cannot race those + // writes — the Wails 3 alpha menu API is not goroutine-safe and + // reads item.disabled/item.hidden at NSMenuItem construction time. + go t.loadProfiles() } if exitNodesChanged { t.rebuildExitNodes(exitNodes) @@ -711,12 +743,18 @@ func (t *Tray) loadConfig() { // loadProfiles refreshes the Profiles submenu from the daemon. Each // entry is a checkbox showing the active profile and switches on click. -// Called once on ApplicationStarted and again after a successful switch -// so the checkmark moves to the new active profile. +// Called on ApplicationStarted, after a successful switchProfile, and +// from applyStatus whenever the daemon's status text changes — the +// last case catches profile flips driven by another channel (CLI +// "netbird profile select", autoconnect picking the persisted profile +// after the UI's first ListProfiles, etc.) since the daemon does not +// emit a dedicated active-profile event. func (t *Tray) loadProfiles() { if t.profileSubmenu == nil { return } + t.profileLoadMu.Lock() + defer t.profileLoadMu.Unlock() ctx, cancel := context.WithCancel(context.Background()) defer cancel() From e4eedbe18f821a422a3de3fc5b163a708342fbd7 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 14 May 2026 14:52:14 +0200 Subject: [PATCH 3/6] [client/ui] Mirror tray profile switch to user-side ProfileManager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Fyne UI used to write the active profile to both fronts on every switch (profile.go:264-273): the daemon SwitchProfile RPC for /var/lib/netbird/active_profile.json, then profileManager.SwitchProfile for the user-side ~/Library/Application Support/netbird/active_profile. The Wails ProfileSwitcher only kept the first. Without the user-side mirror, a UI tray switch updates the daemon's state but the CLI ProfileManager.GetActiveProfile() still returns the stale "default". The next "netbird up" then sends ProfileName="default" in the Login/Up request, and the daemon silently switches back to default, reverting whatever the user just picked in the tray. Mirror the daemon switch with profilemanager.NewProfileManager(). SwitchProfile after the daemon RPC succeeds. The daemon stays the authority — a user-side write failure is logged as a warning, not a hard error. --- client/ui/services/profileswitcher.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/client/ui/services/profileswitcher.go b/client/ui/services/profileswitcher.go index 1b3d2ba43..5fd8ae1ca 100644 --- a/client/ui/services/profileswitcher.go +++ b/client/ui/services/profileswitcher.go @@ -8,6 +8,8 @@ import ( "strings" log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/profilemanager" ) // ProfileSwitcher encapsulates the full profile-switching reconnect policy so @@ -62,6 +64,21 @@ func (s *ProfileSwitcher) SwitchActive(ctx context.Context, p ProfileRef) error return fmt.Errorf("switch profile %q: %w", p.ProfileName, err) } + // Mirror the daemon-side switch into the user-side ProfileManager state + // (~/Library/Application Support/netbird/active_profile on macOS, the + // equivalent user config dir elsewhere). The CLI's `netbird up` reads + // from this file (cmd/up.go: pm.GetActiveProfile()) and then sends the + // resolved name back in the Login/Up RPC — if it diverges from the + // daemon-side /var/lib/netbird/active_profile.json, the daemon will + // silently switch its active profile to whatever the CLI sends, so the + // next CLI `up` after a UI switch reverts the profile. Failures here + // don't abort the switch (the daemon is the authority; the local + // mirror is a cache the CLI consults), but they leave the CLI's view + // stale until the next successful switch — surface as a warning. + if err := profilemanager.NewProfileManager().SwitchProfile(p.ProfileName); err != nil { + log.Warnf("profileswitcher: mirror to user-side ProfileManager failed: %v", err) + } + if needsDown { if err := s.connection.Down(ctx); err != nil { log.Errorf("profileswitcher: Down: %v", err) From c0e7c61c4b204d7556ab257d2fa7279b65e3f4c6 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 14 May 2026 15:44:15 +0200 Subject: [PATCH 4/6] [client] Close giveUpChan in connectWithRetryRuns defer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The trailing close(giveUpChan) at the bottom of the function only ran on the backoff.Retry path. The DisableAutoConnect path returned early via the if-block, skipping the close entirely. That branch is hit whenever the active profile has auto-connect disabled — so every Down for those profiles waited the full 5s timeout in the Down RPC select (and twice when two Downs queued up, since both snapped the same never-closing chan). Move close(giveUpChan) into the existing defer so it fires on every exit path: DisableAutoConnect return, backoff.Retry return, or panic. The close happens after clientRunning=false is committed under the mutex, so a Down/Up that wakes on the chan-close doesn't observe a half-state where the chan is closed but clientRunning is still true. Updates the Down RPC comment to point at the deferred close as the signal source, and reframes the 5s timeout warning as "the goroutine is wedged in a slow teardown step" rather than the expected case. --- client/server/server.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index 0b42b271d..eddc89b70 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -229,10 +229,20 @@ func (s *Server) Start() error { // mechanism to keep the client connected even when the connection is lost. // we cancel retry if the client receive a stop or down command, or if disable auto connect is configured. func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}, giveUpChan chan struct{}) { + // close(giveUpChan) MUST run on every exit path (DisableAutoConnect + // return, backoff.Retry return, panic) — Down() blocks for up to 5s + // waiting on this signal before flipping the state to Idle, and a + // missed close leaves Down() always hitting the timeout. The signal + // fires AFTER clientRunning=false is committed under the mutex so a + // Down/Up racing with the goroutine exit never observes a half-state + // (chan closed but clientRunning still true). defer func() { s.mutex.Lock() s.clientRunning = false s.mutex.Unlock() + if giveUpChan != nil { + close(giveUpChan) + } }() if s.config.DisableAutoConnect { @@ -287,10 +297,6 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil if err := backoff.Retry(runOperation, backOff); err != nil { log.Errorf("operation failed: %v", err) } - - if giveUpChan != nil { - close(giveUpChan) - } } // loginAttempt attempts to login using the provided information. it returns a status in case something fails @@ -903,11 +909,13 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes // Wait for the connectWithRetryRuns goroutine to finish with a short timeout. // This prevents the goroutine from setting ErrResetConnection after Down() returns. - // The giveUpChan is closed at the end of connectWithRetryRuns. + // The giveUpChan is closed by the goroutine's deferred cleanup (see + // connectWithRetryRuns) on every exit path. A timeout here typically + // means the goroutine is still wedged inside a slow teardown step. if giveUpChan != nil { select { case <-giveUpChan: - log.Debugf("client goroutine finished successfully") + log.Debugf("client goroutine finished, giveUpChan closed") case <-time.After(5 * time.Second): log.Warnf("timeout waiting for client goroutine to finish, proceeding anyway") } From 0fe8764707e9ee38bc81454d8e58c62c0bacd510 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Thu, 14 May 2026 15:44:30 +0200 Subject: [PATCH 5/6] [client/ui] Optimistic Connecting on profile switch, status row disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three UX fixes for the tray's profile-switch flow: * Optimistic Connecting paint when switching from Connected/Connecting. Previously the daemon's Down step emitted Idle before the new profile's Up emitted Connecting, leaving the tray flashing "Disconnected" for the duration of the Down. switchProfile now sets a flag and synthesizes a Connecting paint at click time; applyStatus suppresses the transient Idle and the stale Connected updates that arrive during the old profile's teardown, clearing the flag only when the new profile's flow surfaces (Connecting, NeedsLogin, LoginFailed, SessionExpired, DaemonUnavailable, or a 30s safety timeout). * Disconnect during an in-flight switch now actually disconnects. The switchCancel context cancels the ProfileSwitcher.SwitchActive goroutine so its queued Up RPC never fires, and the switchInProgress flag is cleared so the daemon's Idle push paints through immediately. Without this, the user's Disconnect click was followed seconds later by the switcher's Up bringing the new profile back online. * The first menu row is informational only. SetEnabled(false) is re-applied to t.statusItem (initial build, applyStatus, and the optimistic paint) and the openRoute("/login") OnClick handler is dropped — every actionable transition flows through the Connect/Disconnect entries below. The switchProfile and applyStatus godocs carry the full prevStatus → suppressed-events / final-state transition tables so future readers don't have to rebuild the policy from the code. --- client/ui/tray.go | 153 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 143 insertions(+), 10 deletions(-) diff --git a/client/ui/tray.go b/client/ui/tray.go index 1f6ab0d53..3a182db5d 100644 --- a/client/ui/tray.go +++ b/client/ui/tray.go @@ -134,6 +134,15 @@ type Tray struct { activeUsername string switchCancel context.CancelFunc + // switchInProgress / switchInProgressUntil drive the optimistic + // Connecting feedback when the user clicks a profile while we are + // Connected or Connecting. See switchProfile and applyStatus — + // applyStatus suppresses the transient daemon Idle that the Down + // step emits, keeping the tray on Connecting until the new + // profile's Up actually starts. + switchInProgress bool + switchInProgressUntil time.Time + // profileLoadMu serializes loadProfiles so the daemon-status-driven // refresh in applyStatus cannot race with the ApplicationStarted seed // or the post-switchProfile reload — both manipulate profileSubmenu and @@ -196,14 +205,13 @@ func (t *Tray) ShowWindow() { func (t *Tray) buildMenu() *application.Menu { menu := application.NewMenu() - // statusItem shows the daemon's current status. Kept enabled so the - // label renders in the normal foreground colour (NSMenuItem greys - // disabled items, which makes the live status read like inactive - // chrome), but no OnClick handler is wired — clicking is a no-op. - // The Connect entry below drives every actionable transition, + // statusItem shows the daemon's current status. Disabled (and no + // OnClick handler) so clicks are no-ops — the row is informational + // only. The Connect entry below drives every actionable transition, // including the SSO re-auth flow for NeedsLogin/SessionExpired // (the daemon's Up RPC returns NeedsSSOLogin when applicable). t.statusItem = menu.Add(menuStatusDisconnected). + SetEnabled(false). SetBitmap(iconMenuDotIdle) menu.AddSeparator() @@ -309,8 +317,20 @@ func (t *Tray) handleConnect() { }() } +// handleDisconnect aborts any in-flight profile switch before sending +// Down — otherwise the switcher's queued Up would re-establish the +// connection right after the Disconnect, making the click look like a +// no-op. Also clears the optimistic-Connecting guard so subsequent +// daemon Idle pushes paint through to the tray immediately. func (t *Tray) handleDisconnect() { t.downItem.SetEnabled(false) + t.mu.Lock() + if t.switchCancel != nil { + t.switchCancel() + t.switchCancel = nil + } + t.switchInProgress = false + t.mu.Unlock() go func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -472,8 +492,50 @@ func (t *Tray) onUpdateProgress(ev *application.CustomEvent) { // (connected, hasUpdate, status label) changed — the daemon emits // rapid SubscribeStatus bursts during health probes that would // otherwise spam Shell_NotifyIcon and the log. +// +// Optimistic profile-switch suppression: when switchProfile fired the +// optimistic Connecting paint (see its table), switchInProgress is set +// and the daemon-emitted Connected (stale, from old profile teardown +// where the daemon keeps pushing as peer count drops) and Idle (from +// the Down step) events are swallowed here so the user doesn't see a +// Disconnected/Connected blink before the new profile's flow begins. +// Clears the flag and lets reality take over on: +// - Connecting (the new profile's Up has officially started), +// - NeedsLogin / LoginFailed / SessionExpired / DaemonUnavailable +// (the daemon won't start Up — that's the new profile's terminal +// state, surface it immediately), or +// - the 30s safety timeout (Up wedged or never emitted anything). func (t *Tray) applyStatus(st services.Status) { t.mu.Lock() + if t.switchInProgress { + switch { + case time.Now().After(t.switchInProgressUntil): + // Safety net: the daemon never emitted a follow-up status + // after Down (Up failed silently, RPC wedged, etc.). Drop + // the optimistic guard and render whatever arrives next. + t.switchInProgress = false + case strings.EqualFold(st.Status, services.StatusConnecting), + strings.EqualFold(st.Status, services.StatusNeedsLogin), + strings.EqualFold(st.Status, services.StatusLoginFailed), + strings.EqualFold(st.Status, services.StatusSessionExpired), + strings.EqualFold(st.Status, services.StatusDaemonUnavailable): + // New profile's flow has officially started: Up has begun + // (Connecting) or the daemon refused to start Up at all + // (NeedsLogin/LoginFailed/SessionExpired/DaemonUnavailable). + // Clear the guard and let applyStatus paint normally — the + // user should see the new state without further delay. + t.switchInProgress = false + default: + // Connected (stale leftover from the old profile's teardown + // — the daemon emits extra status updates while peer count + // drops during cleanup) or Idle (transient between Down and + // Up). Suppress so the tray stays on the optimistic + // Connecting until the new profile's flow actually emits a + // post-switch status. + t.mu.Unlock() + return + } + } connected := strings.EqualFold(st.Status, services.StatusConnected) iconChanged := connected != t.connected || st.Status != t.lastStatus // Detect the transition into SessionExpired: the daemon emits the @@ -500,10 +562,9 @@ func (t *Tray) applyStatus(st services.Status) { daemonUnavailable := strings.EqualFold(st.Status, services.StatusDaemonUnavailable) connecting := strings.EqualFold(st.Status, services.StatusConnecting) if t.statusItem != nil { - // Label-only: enabled so the text isn't greyed out, but no - // click handler is bound (see buildMenu). Swap the displayed - // text so the user sees a familiar phrase instead of the raw - // daemon enum. + // Label-only: kept disabled (informational row). Swap the + // displayed text so the user sees a familiar phrase instead + // of the raw daemon enum. label := st.Status switch { case daemonUnavailable: @@ -512,7 +573,7 @@ func (t *Tray) applyStatus(st services.Status) { label = menuStatusDisconnected } t.statusItem.SetLabel(label) - t.statusItem.SetEnabled(true) + t.statusItem.SetEnabled(false) t.applyStatusIndicator(st.Status) } if t.upItem != nil { @@ -826,6 +887,39 @@ func (t *Tray) loadProfiles() { // switchProfile cancels any in-flight profile switch, then starts a new one. // Cancelling the previous context aborts its in-flight gRPC calls (Down/Up) // so rapid clicks always converge to the last selected profile. +// +// Optimistic-feedback transition table (driven by t.lastStatus at click time; +// see applyStatus for the suppression rules): +// +// ┌──────────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐ +// │ prevStatus │ ProfileSwitcher call │ Optimistic tray label │ Suppressed events │ +// │ │ │ shown immediately │ until new flow │ +// ├──────────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤ +// │ Connected │ Switch + Down + Up │ Connecting (spinner) │ Connected, Idle │ +// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle │ +// │ Idle │ Switch only │ (no change) │ — │ +// │ NeedsLogin │ Switch + Down │ (no change) │ — │ +// │ LoginFailed │ Switch + Down │ (no change) │ — │ +// │ SessionExpired │ Switch + Down │ (no change) │ — │ +// │ DaemonUnavailable │ (RPC fails) │ (no change; toast error) │ — │ +// └──────────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘ +// +// Only Connected/Connecting need optimistic feedback because they're the +// only prevStatus values where the switcher follows up with Up — i.e. the +// only ones where the daemon emits stale Connected updates during Down +// (peer count drops as the engine tears down) and then Idle, before the +// new profile's Connecting/Connected. Both the stale Connected and the +// transient Idle are suppressed by applyStatus until a status that +// indicates the new flow has begun (Connecting, or any of the "Up won't +// run" states: NeedsLogin / LoginFailed / SessionExpired / +// DaemonUnavailable). The other branches either don't drive Down/Up at +// all (Idle) or stop after Down (NeedsLogin / LoginFailed / +// SessionExpired), and the resulting Idle is the correct terminal +// state, so no suppression is needed. +// +// A 30s safety timeout clears the in-progress flag if the daemon never +// emits a follow-up event — without it, a stuck Down/Up could leave the +// tray frozen on Connecting forever. func (t *Tray) switchProfile(name string) { t.mu.Lock() if t.switchCancel != nil { @@ -833,8 +927,18 @@ func (t *Tray) switchProfile(name string) { } ctx, cancel := context.WithCancel(context.Background()) t.switchCancel = cancel + needsOptimistic := strings.EqualFold(t.lastStatus, services.StatusConnected) || + strings.EqualFold(t.lastStatus, services.StatusConnecting) + if needsOptimistic { + t.switchInProgress = true + t.switchInProgressUntil = time.Now().Add(30 * time.Second) + } t.mu.Unlock() + if needsOptimistic { + t.applyOptimisticConnecting() + } + go func() { username, err := t.svc.Profiles.Username() if err != nil { @@ -856,6 +960,35 @@ func (t *Tray) switchProfile(name string) { }() } +// applyOptimisticConnecting paints the tray as if the daemon had just +// pushed status=Connecting, without waiting for the daemon to actually +// get there. Touches the status cache (so the iconChanged comparison in +// the next real applyStatus call lines up) and the same item bits that +// applyStatus would set for Connecting. Doesn't touch exitNodes, +// daemonVersion, or sessionExpired — none of those should change as a +// side effect of a profile switch. +func (t *Tray) applyOptimisticConnecting() { + t.mu.Lock() + t.connected = false + t.lastStatus = services.StatusConnecting + t.mu.Unlock() + + t.applyIcon() + if t.statusItem != nil { + t.statusItem.SetLabel(services.StatusConnecting) + t.statusItem.SetEnabled(false) + t.applyStatusIndicator(services.StatusConnecting) + } + if t.upItem != nil { + t.upItem.SetHidden(true) + t.upItem.SetEnabled(false) + } + if t.downItem != nil { + t.downItem.SetHidden(false) + t.downItem.SetEnabled(true) + } +} + // notify wraps the Wails notification service with the tray's standard // id-prefix scheme and swallows errors (notifications are best-effort). func (t *Tray) notify(title, body, id string) { From 505fcc7f7a9976504dd755743d6ab883382eea53 Mon Sep 17 00:00:00 2001 From: Zoltan Papp Date: Fri, 15 May 2026 10:01:26 +0200 Subject: [PATCH 6/6] [client/ui] Move profile-switch suppression from tray to Peers service MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The optimistic Connecting paint and the Idle/stale-Connected suppression lived in the tray's applyStatus, so only the tray got the smoothed-out transition during a profile switch — the React Status page (useStatus hook in frontend) subscribes to the same netbird:status event and was seeing the raw daemon stream, complete with the Disconnected blink. Move the policy one layer up into the Peers service, between SubscribeStatus and the Wails event bus, so every consumer downstream sees the same filtered stream: * Peers gains BeginProfileSwitch / CancelProfileSwitch / shouldSuppress. BeginProfileSwitch sets the in-progress flag and emits a synthetic Connecting status so both the tray and React paint Connecting immediately. shouldSuppress swallows the daemon's stale Connected (peer-count teardown) and transient Idle (Down between flows) until Connecting / NeedsLogin / LoginFailed / SessionExpired / DaemonUnavailable indicates the new profile's flow has started, or a 30s safety timeout fires. * ProfileSwitcher.SwitchActive calls peers.BeginProfileSwitch when wasActive (prevStatus was Connected or Connecting) — the only cases where the daemon emits the blink-inducing sequence. Other prevStatuses already terminate cleanly on Idle. * Tray loses its switchInProgress fields, applyOptimisticConnecting helper, applyStatus suppression switch, and switchProfile's optimistic-paint call. handleDisconnect now calls Peers.CancelProfileSwitch alongside cancelling switchCancel, so the abort path bypasses the suppression filter and the daemon's Idle paints through immediately. The full prevStatus -> action / optimistic label / suppressed events matrix now lives in the ProfileSwitcher struct godoc, with the suppression-rule-per-incoming-status table on the Peers struct godoc — together they describe the click-time policy and the stream-filter behaviour without duplication. Wails bindings need regenerating to pick up Peers.BeginProfileSwitch and Peers.CancelProfileSwitch. --- client/ui/services/peers.go | 82 +++++++++++++++ client/ui/services/profileswitcher.go | 59 ++++++++--- client/ui/tray.go | 139 +++----------------------- 3 files changed, 143 insertions(+), 137 deletions(-) diff --git a/client/ui/services/peers.go b/client/ui/services/peers.go index 0adeeba25..bba67502b 100644 --- a/client/ui/services/peers.go +++ b/client/ui/services/peers.go @@ -130,6 +130,26 @@ type Status struct { // Peers serves the dashboard data: one polled Status RPC and a long-running // SubscribeEvents stream that re-emits every event over the Wails event bus. +// +// Profile-switch suppression: ProfileSwitcher calls BeginProfileSwitch +// before tearing down the old profile when it would otherwise be followed +// by an Up on the new profile (i.e. previous status was Connected or +// Connecting). statusStreamLoop then swallows the transient stale +// Connected and Idle pushes the daemon emits during Down so the tray +// and the React Status page both see Connecting → new-profile-state +// instead of Connected → Connected → Idle → Connecting → new-state. +// +// Suppression transition (applied by shouldSuppress before each emit): +// +// ┌────────────────────────────────────────────┬──────────────────────────────────┐ +// │ Incoming daemon status │ Action │ +// ├────────────────────────────────────────────┼──────────────────────────────────┤ +// │ Connected, Idle │ Suppress (the blink we hide) │ +// │ Connecting │ Emit, clear flag (new Up began) │ +// │ NeedsLogin, LoginFailed, SessionExpired, │ Emit, clear flag (new profile's │ +// │ DaemonUnavailable │ "Up won't run" terminal state) │ +// │ (timeout elapsed) │ Clear flag, emit normally │ +// └────────────────────────────────────────────┴──────────────────────────────────┘ type Peers struct { conn DaemonConn emitter Emitter @@ -137,12 +157,70 @@ type Peers struct { mu sync.Mutex cancel context.CancelFunc streamWg sync.WaitGroup + + switchMu sync.Mutex + switchInProgress bool + switchInProgressUntil time.Time } func NewPeers(conn DaemonConn, emitter Emitter) *Peers { return &Peers{conn: conn, emitter: emitter} } +// BeginProfileSwitch is called by ProfileSwitcher at the start of a switch +// when the previous status was Connected/Connecting — i.e. the daemon is +// about to emit Connected updates during Down's peer-count teardown and +// then an Idle before the new profile's Up resumes the stream. The flag +// makes statusStreamLoop drop those transient events. A synthetic +// Connecting snapshot is emitted right away so both consumers (tray and +// React) paint the optimistic state immediately. A 30s safety timeout +// clears the flag if the daemon never emits a follow-up status. +func (s *Peers) BeginProfileSwitch() { + s.switchMu.Lock() + s.switchInProgress = true + s.switchInProgressUntil = time.Now().Add(30 * time.Second) + s.switchMu.Unlock() + s.emitter.Emit(EventStatus, Status{Status: StatusConnecting}) +} + +// CancelProfileSwitch is called by callers that abort the switch midway +// (the tray's Disconnect click while Connecting). Clears the suppression +// flag so the next daemon Idle paints through immediately instead of +// being swallowed. +func (s *Peers) CancelProfileSwitch() { + s.switchMu.Lock() + s.switchInProgress = false + s.switchMu.Unlock() +} + +func (s *Peers) shouldSuppress(st Status) bool { + s.switchMu.Lock() + defer s.switchMu.Unlock() + if !s.switchInProgress { + return false + } + if time.Now().After(s.switchInProgressUntil) { + s.switchInProgress = false + return false + } + switch { + case strings.EqualFold(st.Status, StatusConnecting), + strings.EqualFold(st.Status, StatusNeedsLogin), + strings.EqualFold(st.Status, StatusLoginFailed), + strings.EqualFold(st.Status, StatusSessionExpired), + strings.EqualFold(st.Status, StatusDaemonUnavailable): + // New profile's flow has officially begun (Up started, or daemon + // refused to start it). Clear the guard and let it through. + s.switchInProgress = false + return false + default: + // Connected (stale carryover from old profile's teardown) or Idle + // (transient between Down and Up). Suppress so the optimistic + // Connecting from BeginProfileSwitch stays painted. + return true + } +} + // Watch starts the background loops that feed the frontend: // - statusStreamLoop: push-driven snapshots on connection-state change // (Connected/Disconnected/Connecting, peer list, address). Drives the @@ -272,6 +350,10 @@ func (s *Peers) statusStreamLoop(ctx context.Context) { unavailable = false st := statusFromProto(resp) log.Infof("backend event: status status=%q peers=%d", st.Status, len(st.Peers)) + if s.shouldSuppress(st) { + log.Debugf("suppressing status=%q during profile switch", st.Status) + continue + } s.emitter.Emit(EventStatus, st) } } diff --git a/client/ui/services/profileswitcher.go b/client/ui/services/profileswitcher.go index 5fd8ae1ca..e2cb492df 100644 --- a/client/ui/services/profileswitcher.go +++ b/client/ui/services/profileswitcher.go @@ -12,21 +12,44 @@ import ( "github.com/netbirdio/netbird/client/internal/profilemanager" ) -// ProfileSwitcher encapsulates the full profile-switching reconnect policy so -// both the tray and the React frontend use identical logic. +// ProfileSwitcher encapsulates the full profile-switching reconnect policy +// so both the tray and the React frontend use identical logic. // -// Reconnect policy: +// Reconnect policy + optimistic-feedback table (driven by prevStatus +// captured from Peers.Get at SwitchActive entry): // -// ┌─────────────────┬──────────────────────┬────────────────────────────────────┐ -// │ Previous status │ Action │ Rationale │ -// ├─────────────────┼──────────────────────┼────────────────────────────────────┤ -// │ Connected │ Switch + Down + Up │ Reconnect with the new profile. │ -// │ Connecting │ Switch + Down + Up │ Stop old retry loop, restart. │ -// │ NeedsLogin │ Switch + Down │ Clear stale error; user logs in. │ -// │ LoginFailed │ Switch + Down │ Clear stale error; user logs in. │ -// │ SessionExpired │ Switch + Down │ Clear stale error; user logs in. │ -// │ Idle │ Switch only │ User chose offline; don't connect. │ -// └─────────────────┴──────────────────────┴────────────────────────────────────┘ +// ┌─────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐ +// │ Previous status │ Action │ Optimistic UI label │ Suppressed events │ +// │ │ │ shown immediately │ until new flow │ +// ├─────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤ +// │ Connected │ Switch + Down + Up │ Connecting (synthetic) │ Connected, Idle │ +// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle │ +// │ NeedsLogin │ Switch + Down │ (no change) │ — │ +// │ LoginFailed │ Switch + Down │ (no change) │ — │ +// │ SessionExpired │ Switch + Down │ (no change) │ — │ +// │ Idle │ Switch only │ (no change) │ — │ +// └─────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘ +// +// Only Connected/Connecting trigger the optimistic Connecting paint +// (via Peers.BeginProfileSwitch): they're the only prevStatuses where +// the daemon emits stale Connected updates (peer count drops as the +// engine tears down) and then Idle, before the new profile's Up +// resumes the stream. Both are swallowed by Peers.shouldSuppress +// until a status that signals the new flow has begun (Connecting, or +// any of the "Up won't run" terminal states: NeedsLogin / LoginFailed / +// SessionExpired / DaemonUnavailable). The other prevStatuses either +// don't drive Down/Up at all (Idle) or stop after Down (NeedsLogin / +// LoginFailed / SessionExpired) — the resulting Idle is the correct +// terminal state, so no suppression is needed. +// +// Rationale for each Action choice: +// +// Connected → Reconnect with the new profile. +// Connecting → Stop old retry loop, restart. +// NeedsLogin → Clear stale error; user logs in. +// LoginFailed → Clear stale error; user logs in. +// SessionExpired → Clear stale error; user logs in. +// Idle → User chose offline; don't connect. type ProfileSwitcher struct { profiles *Profiles connection *Connection @@ -60,6 +83,16 @@ func (s *ProfileSwitcher) SwitchActive(ctx context.Context, p ProfileRef) error log.Infof("profileswitcher: switch profile=%q prevStatus=%q wasActive=%v needsDown=%v", p.ProfileName, prevStatus, wasActive, needsDown) + // Optimistic Connecting feedback for tray + React Status page: only + // when wasActive — those are the prevStatuses where the daemon will + // emit stale Connected + transient Idle pushes during Down before + // the new profile's Up resumes the stream (see Peers godoc for the + // suppression table). Other prevStatuses already terminate cleanly + // on Idle, no suppression needed. + if wasActive { + s.peers.BeginProfileSwitch() + } + if err := s.profiles.Switch(ctx, p); err != nil { return fmt.Errorf("switch profile %q: %w", p.ProfileName, err) } diff --git a/client/ui/tray.go b/client/ui/tray.go index 3a182db5d..cc947c919 100644 --- a/client/ui/tray.go +++ b/client/ui/tray.go @@ -134,15 +134,6 @@ type Tray struct { activeUsername string switchCancel context.CancelFunc - // switchInProgress / switchInProgressUntil drive the optimistic - // Connecting feedback when the user clicks a profile while we are - // Connected or Connecting. See switchProfile and applyStatus — - // applyStatus suppresses the transient daemon Idle that the Down - // step emits, keeping the tray on Connecting until the new - // profile's Up actually starts. - switchInProgress bool - switchInProgressUntil time.Time - // profileLoadMu serializes loadProfiles so the daemon-status-driven // refresh in applyStatus cannot race with the ApplicationStarted seed // or the post-switchProfile reload — both manipulate profileSubmenu and @@ -320,8 +311,9 @@ func (t *Tray) handleConnect() { // handleDisconnect aborts any in-flight profile switch before sending // Down — otherwise the switcher's queued Up would re-establish the // connection right after the Disconnect, making the click look like a -// no-op. Also clears the optimistic-Connecting guard so subsequent -// daemon Idle pushes paint through to the tray immediately. +// no-op. Also clears Peers' optimistic-Connecting guard so the daemon's +// Idle push (and any subsequent updates) paint through immediately +// instead of being swallowed by the profile-switch suppression filter. func (t *Tray) handleDisconnect() { t.downItem.SetEnabled(false) t.mu.Lock() @@ -329,8 +321,8 @@ func (t *Tray) handleDisconnect() { t.switchCancel() t.switchCancel = nil } - t.switchInProgress = false t.mu.Unlock() + t.svc.Peers.CancelProfileSwitch() go func() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -493,49 +485,12 @@ func (t *Tray) onUpdateProgress(ev *application.CustomEvent) { // rapid SubscribeStatus bursts during health probes that would // otherwise spam Shell_NotifyIcon and the log. // -// Optimistic profile-switch suppression: when switchProfile fired the -// optimistic Connecting paint (see its table), switchInProgress is set -// and the daemon-emitted Connected (stale, from old profile teardown -// where the daemon keeps pushing as peer count drops) and Idle (from -// the Down step) events are swallowed here so the user doesn't see a -// Disconnected/Connected blink before the new profile's flow begins. -// Clears the flag and lets reality take over on: -// - Connecting (the new profile's Up has officially started), -// - NeedsLogin / LoginFailed / SessionExpired / DaemonUnavailable -// (the daemon won't start Up — that's the new profile's terminal -// state, surface it immediately), or -// - the 30s safety timeout (Up wedged or never emitted anything). +// Profile-switch suppression lives one layer up in services/peers.go +// (Peers.BeginProfileSwitch / shouldSuppress) so the optimistic +// Connecting paint and the suppressed Idle/Connected events are shared +// with the React Status page rather than being a tray-only behaviour. func (t *Tray) applyStatus(st services.Status) { t.mu.Lock() - if t.switchInProgress { - switch { - case time.Now().After(t.switchInProgressUntil): - // Safety net: the daemon never emitted a follow-up status - // after Down (Up failed silently, RPC wedged, etc.). Drop - // the optimistic guard and render whatever arrives next. - t.switchInProgress = false - case strings.EqualFold(st.Status, services.StatusConnecting), - strings.EqualFold(st.Status, services.StatusNeedsLogin), - strings.EqualFold(st.Status, services.StatusLoginFailed), - strings.EqualFold(st.Status, services.StatusSessionExpired), - strings.EqualFold(st.Status, services.StatusDaemonUnavailable): - // New profile's flow has officially started: Up has begun - // (Connecting) or the daemon refused to start Up at all - // (NeedsLogin/LoginFailed/SessionExpired/DaemonUnavailable). - // Clear the guard and let applyStatus paint normally — the - // user should see the new state without further delay. - t.switchInProgress = false - default: - // Connected (stale leftover from the old profile's teardown - // — the daemon emits extra status updates while peer count - // drops during cleanup) or Idle (transient between Down and - // Up). Suppress so the tray stays on the optimistic - // Connecting until the new profile's flow actually emits a - // post-switch status. - t.mu.Unlock() - return - } - } connected := strings.EqualFold(st.Status, services.StatusConnected) iconChanged := connected != t.connected || st.Status != t.lastStatus // Detect the transition into SessionExpired: the daemon emits the @@ -888,38 +843,13 @@ func (t *Tray) loadProfiles() { // Cancelling the previous context aborts its in-flight gRPC calls (Down/Up) // so rapid clicks always converge to the last selected profile. // -// Optimistic-feedback transition table (driven by t.lastStatus at click time; -// see applyStatus for the suppression rules): -// -// ┌──────────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐ -// │ prevStatus │ ProfileSwitcher call │ Optimistic tray label │ Suppressed events │ -// │ │ │ shown immediately │ until new flow │ -// ├──────────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤ -// │ Connected │ Switch + Down + Up │ Connecting (spinner) │ Connected, Idle │ -// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle │ -// │ Idle │ Switch only │ (no change) │ — │ -// │ NeedsLogin │ Switch + Down │ (no change) │ — │ -// │ LoginFailed │ Switch + Down │ (no change) │ — │ -// │ SessionExpired │ Switch + Down │ (no change) │ — │ -// │ DaemonUnavailable │ (RPC fails) │ (no change; toast error) │ — │ -// └──────────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘ -// -// Only Connected/Connecting need optimistic feedback because they're the -// only prevStatus values where the switcher follows up with Up — i.e. the -// only ones where the daemon emits stale Connected updates during Down -// (peer count drops as the engine tears down) and then Idle, before the -// new profile's Connecting/Connected. Both the stale Connected and the -// transient Idle are suppressed by applyStatus until a status that -// indicates the new flow has begun (Connecting, or any of the "Up won't -// run" states: NeedsLogin / LoginFailed / SessionExpired / -// DaemonUnavailable). The other branches either don't drive Down/Up at -// all (Idle) or stop after Down (NeedsLogin / LoginFailed / -// SessionExpired), and the resulting Idle is the correct terminal -// state, so no suppression is needed. -// -// A 30s safety timeout clears the in-progress flag if the daemon never -// emits a follow-up event — without it, a stuck Down/Up could leave the -// tray frozen on Connecting forever. +// The optimistic Connecting paint (and suppression of the transient +// Idle/stale Connected daemon events that follow Down) lives in +// services/peers.go — ProfileSwitcher calls Peers.BeginProfileSwitch +// when the previous status was Connected/Connecting, which emits a +// synthetic Connecting status to the event bus and starts filtering +// the daemon stream. That way both this tray and the React Status +// page see the same optimistic state without duplicating policy. func (t *Tray) switchProfile(name string) { t.mu.Lock() if t.switchCancel != nil { @@ -927,18 +857,8 @@ func (t *Tray) switchProfile(name string) { } ctx, cancel := context.WithCancel(context.Background()) t.switchCancel = cancel - needsOptimistic := strings.EqualFold(t.lastStatus, services.StatusConnected) || - strings.EqualFold(t.lastStatus, services.StatusConnecting) - if needsOptimistic { - t.switchInProgress = true - t.switchInProgressUntil = time.Now().Add(30 * time.Second) - } t.mu.Unlock() - if needsOptimistic { - t.applyOptimisticConnecting() - } - go func() { username, err := t.svc.Profiles.Username() if err != nil { @@ -960,35 +880,6 @@ func (t *Tray) switchProfile(name string) { }() } -// applyOptimisticConnecting paints the tray as if the daemon had just -// pushed status=Connecting, without waiting for the daemon to actually -// get there. Touches the status cache (so the iconChanged comparison in -// the next real applyStatus call lines up) and the same item bits that -// applyStatus would set for Connecting. Doesn't touch exitNodes, -// daemonVersion, or sessionExpired — none of those should change as a -// side effect of a profile switch. -func (t *Tray) applyOptimisticConnecting() { - t.mu.Lock() - t.connected = false - t.lastStatus = services.StatusConnecting - t.mu.Unlock() - - t.applyIcon() - if t.statusItem != nil { - t.statusItem.SetLabel(services.StatusConnecting) - t.statusItem.SetEnabled(false) - t.applyStatusIndicator(services.StatusConnecting) - } - if t.upItem != nil { - t.upItem.SetHidden(true) - t.upItem.SetEnabled(false) - } - if t.downItem != nil { - t.downItem.SetHidden(false) - t.downItem.SetEnabled(true) - } -} - // notify wraps the Wails notification service with the tray's standard // id-prefix scheme and swallows errors (notifications are best-effort). func (t *Tray) notify(title, body, id string) {