mirror of
https://github.com/netbirdio/netbird.git
synced 2026-05-16 13:49:58 +00:00
Merge branch 'ui-refactor' into ui-refactor-ui
This commit is contained in:
@@ -1281,6 +1281,18 @@ func (d *Status) notifyStateChange() {
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyStateChange is the public wake-the-subscribers entry point used by
|
||||
// callers that mutate state outside the peer recorder — most importantly
|
||||
// the connect-state machine, which writes StatusNeedsLogin into the
|
||||
// shared contextState (client/internal/state.go) without touching any
|
||||
// recorder field. Without this push the SubscribeStatus stream stays on
|
||||
// the previous snapshot until an unrelated peer/management/signal
|
||||
// change happens to fire notifyStateChange, leaving the UI's status
|
||||
// out of sync with the daemon.
|
||||
func (d *Status) NotifyStateChange() {
|
||||
d.notifyStateChange()
|
||||
}
|
||||
|
||||
func (d *Status) SetWgIface(wgInterface WGIfaceStatus) {
|
||||
d.mux.Lock()
|
||||
defer d.mux.Unlock()
|
||||
|
||||
@@ -33,17 +33,34 @@ func CtxGetState(ctx context.Context) *contextState {
|
||||
}
|
||||
|
||||
type contextState struct {
|
||||
err error
|
||||
status StatusType
|
||||
mutex sync.Mutex
|
||||
err error
|
||||
status StatusType
|
||||
mutex sync.Mutex
|
||||
onChange func()
|
||||
}
|
||||
|
||||
// SetOnChange installs a callback fired after every successful Set. Used by
|
||||
// the daemon to wire the status recorder's notifyStateChange so any
|
||||
// state.Set in the connect/login paths pushes a fresh snapshot to
|
||||
// SubscribeStatus subscribers without each callsite having to opt in.
|
||||
// The callback runs outside the contextState mutex to avoid a lock-order
|
||||
// dependency with the recorder's stateChangeMux.
|
||||
func (c *contextState) SetOnChange(fn func()) {
|
||||
c.mutex.Lock()
|
||||
c.onChange = fn
|
||||
c.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (c *contextState) Set(update StatusType) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
c.status = update
|
||||
c.err = nil
|
||||
cb := c.onChange
|
||||
c.mutex.Unlock()
|
||||
|
||||
if cb != nil {
|
||||
cb()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *contextState) Status() (StatusType, error) {
|
||||
|
||||
@@ -140,6 +140,15 @@ func (s *Server) Start() error {
|
||||
}
|
||||
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
// Every contextState.Set in the connect/login/server paths must push a
|
||||
// SubscribeStatus snapshot, otherwise transitions that don't happen to
|
||||
// be accompanied by a Mark{Management,Signal,...} call (e.g. plain
|
||||
// StatusNeedsLogin after a PermissionDenied login, StatusLoginFailed
|
||||
// after OAuth init failure, StatusIdle in the Login defer) leave the
|
||||
// UI stuck on the previous status until the next unrelated peer event.
|
||||
// Binding the recorder here means new state.Set callsites don't have
|
||||
// to opt in individually.
|
||||
state.SetOnChange(s.statusRecorder.NotifyStateChange)
|
||||
|
||||
if err := handlePanicLog(); err != nil {
|
||||
log.Warnf("failed to redirect stderr: %v", err)
|
||||
@@ -220,10 +229,20 @@ func (s *Server) Start() error {
|
||||
// mechanism to keep the client connected even when the connection is lost.
|
||||
// we cancel retry if the client receive a stop or down command, or if disable auto connect is configured.
|
||||
func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}, giveUpChan chan struct{}) {
|
||||
// close(giveUpChan) MUST run on every exit path (DisableAutoConnect
|
||||
// return, backoff.Retry return, panic) — Down() blocks for up to 5s
|
||||
// waiting on this signal before flipping the state to Idle, and a
|
||||
// missed close leaves Down() always hitting the timeout. The signal
|
||||
// fires AFTER clientRunning=false is committed under the mutex so a
|
||||
// Down/Up racing with the goroutine exit never observes a half-state
|
||||
// (chan closed but clientRunning still true).
|
||||
defer func() {
|
||||
s.mutex.Lock()
|
||||
s.clientRunning = false
|
||||
s.mutex.Unlock()
|
||||
if giveUpChan != nil {
|
||||
close(giveUpChan)
|
||||
}
|
||||
}()
|
||||
|
||||
if s.config.DisableAutoConnect {
|
||||
@@ -278,10 +297,6 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil
|
||||
if err := backoff.Retry(runOperation, backOff); err != nil {
|
||||
log.Errorf("operation failed: %v", err)
|
||||
}
|
||||
|
||||
if giveUpChan != nil {
|
||||
close(giveUpChan)
|
||||
}
|
||||
}
|
||||
|
||||
// loginAttempt attempts to login using the provided information. it returns a status in case something fails
|
||||
@@ -571,8 +586,35 @@ func (s *Server) Login(callerCtx context.Context, msg *proto.LoginRequest) (*pro
|
||||
return &proto.LoginResponse{}, nil
|
||||
}
|
||||
|
||||
// WaitSSOLogin uses the userCode to validate the TokenInfo and
|
||||
// waits for the user to continue with the login on a browser
|
||||
// WaitSSOLogin validates the supplied userCode against the in-flight OAuth
|
||||
// device/PKCE flow and blocks until the user finishes the browser leg.
|
||||
//
|
||||
// State transitions on exit:
|
||||
//
|
||||
// ┌──────────────────────────────────────────┬──────────────────────────────────┐
|
||||
// │ Outcome │ contextState │
|
||||
// ├──────────────────────────────────────────┼──────────────────────────────────┤
|
||||
// │ Success → loginAttempt → Connected │ StatusConnected (loginAttempt) │
|
||||
// │ Success → loginAttempt → still-NeedsLogin│ StatusNeedsLogin (loginAttempt) │
|
||||
// │ Success → loginAttempt error │ StatusLoginFailed (loginAttempt) │
|
||||
// │ UserCode mismatch │ StatusLoginFailed │
|
||||
// │ WaitToken: context.Canceled (external │ defer runs: status untouched if │
|
||||
// │ abort — profile switch invokes │ already NeedsLogin/LoginFailed,│
|
||||
// │ actCancel/waitCancel, app quit, │ else StatusIdle. Keeps the │
|
||||
// │ another WaitSSOLogin started) │ cancel from leaking as a │
|
||||
// │ │ spurious LoginFailed on the │
|
||||
// │ │ next profile's Up. │
|
||||
// │ WaitToken: context.DeadlineExceeded │ StatusNeedsLogin │
|
||||
// │ (OAuth device-code window expired │ (retryable; the UI's "Connect" │
|
||||
// │ while waiting on the browser leg) │ re-enters the Login flow) │
|
||||
// │ WaitToken: any other error │ StatusLoginFailed │
|
||||
// │ (access_denied, expired_token, HTTP │ (genuine auth/IO failure; │
|
||||
// │ failure, token validation rejection) │ surfaced verbatim to caller) │
|
||||
// └──────────────────────────────────────────┴──────────────────────────────────┘
|
||||
//
|
||||
// The defer at the top of the function applies the Idle fallback so callers
|
||||
// that bypass the explicit Set calls (the Canceled branch above, the success
|
||||
// path before loginAttempt) still land on a sensible terminal status.
|
||||
func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLoginRequest) (*proto.WaitSSOLoginResponse, error) {
|
||||
s.mutex.Lock()
|
||||
if s.actCancel != nil {
|
||||
@@ -632,7 +674,21 @@ func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLogin
|
||||
s.mutex.Lock()
|
||||
s.oauthAuthFlow.expiresAt = time.Now()
|
||||
s.mutex.Unlock()
|
||||
state.Set(internal.StatusLoginFailed)
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
// External abort (profile switch, app quit, another
|
||||
// WaitSSOLogin started). Not a login failure — let the
|
||||
// top-level defer fall through to StatusIdle so the next
|
||||
// flow starts from a clean state.
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
// OAuth device-code window expired with no user action.
|
||||
// Retryable — leave the daemon in NeedsLogin so the UI
|
||||
// keeps the Login affordance instead of reading as a
|
||||
// hard failure.
|
||||
state.Set(internal.StatusNeedsLogin)
|
||||
default:
|
||||
state.Set(internal.StatusLoginFailed)
|
||||
}
|
||||
log.Errorf("waiting for browser login failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
@@ -853,11 +909,13 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes
|
||||
|
||||
// Wait for the connectWithRetryRuns goroutine to finish with a short timeout.
|
||||
// This prevents the goroutine from setting ErrResetConnection after Down() returns.
|
||||
// The giveUpChan is closed at the end of connectWithRetryRuns.
|
||||
// The giveUpChan is closed by the goroutine's deferred cleanup (see
|
||||
// connectWithRetryRuns) on every exit path. A timeout here typically
|
||||
// means the goroutine is still wedged inside a slow teardown step.
|
||||
if giveUpChan != nil {
|
||||
select {
|
||||
case <-giveUpChan:
|
||||
log.Debugf("client goroutine finished successfully")
|
||||
log.Debugf("client goroutine finished, giveUpChan closed")
|
||||
case <-time.After(5 * time.Second):
|
||||
log.Warnf("timeout waiting for client goroutine to finish, proceeding anyway")
|
||||
}
|
||||
|
||||
@@ -130,6 +130,26 @@ type Status struct {
|
||||
|
||||
// Peers serves the dashboard data: one polled Status RPC and a long-running
|
||||
// SubscribeEvents stream that re-emits every event over the Wails event bus.
|
||||
//
|
||||
// Profile-switch suppression: ProfileSwitcher calls BeginProfileSwitch
|
||||
// before tearing down the old profile when it would otherwise be followed
|
||||
// by an Up on the new profile (i.e. previous status was Connected or
|
||||
// Connecting). statusStreamLoop then swallows the transient stale
|
||||
// Connected and Idle pushes the daemon emits during Down so the tray
|
||||
// and the React Status page both see Connecting → new-profile-state
|
||||
// instead of Connected → Connected → Idle → Connecting → new-state.
|
||||
//
|
||||
// Suppression transition (applied by shouldSuppress before each emit):
|
||||
//
|
||||
// ┌────────────────────────────────────────────┬──────────────────────────────────┐
|
||||
// │ Incoming daemon status │ Action │
|
||||
// ├────────────────────────────────────────────┼──────────────────────────────────┤
|
||||
// │ Connected, Idle │ Suppress (the blink we hide) │
|
||||
// │ Connecting │ Emit, clear flag (new Up began) │
|
||||
// │ NeedsLogin, LoginFailed, SessionExpired, │ Emit, clear flag (new profile's │
|
||||
// │ DaemonUnavailable │ "Up won't run" terminal state) │
|
||||
// │ (timeout elapsed) │ Clear flag, emit normally │
|
||||
// └────────────────────────────────────────────┴──────────────────────────────────┘
|
||||
type Peers struct {
|
||||
conn DaemonConn
|
||||
emitter Emitter
|
||||
@@ -137,12 +157,70 @@ type Peers struct {
|
||||
mu sync.Mutex
|
||||
cancel context.CancelFunc
|
||||
streamWg sync.WaitGroup
|
||||
|
||||
switchMu sync.Mutex
|
||||
switchInProgress bool
|
||||
switchInProgressUntil time.Time
|
||||
}
|
||||
|
||||
func NewPeers(conn DaemonConn, emitter Emitter) *Peers {
|
||||
return &Peers{conn: conn, emitter: emitter}
|
||||
}
|
||||
|
||||
// BeginProfileSwitch is called by ProfileSwitcher at the start of a switch
|
||||
// when the previous status was Connected/Connecting — i.e. the daemon is
|
||||
// about to emit Connected updates during Down's peer-count teardown and
|
||||
// then an Idle before the new profile's Up resumes the stream. The flag
|
||||
// makes statusStreamLoop drop those transient events. A synthetic
|
||||
// Connecting snapshot is emitted right away so both consumers (tray and
|
||||
// React) paint the optimistic state immediately. A 30s safety timeout
|
||||
// clears the flag if the daemon never emits a follow-up status.
|
||||
func (s *Peers) BeginProfileSwitch() {
|
||||
s.switchMu.Lock()
|
||||
s.switchInProgress = true
|
||||
s.switchInProgressUntil = time.Now().Add(30 * time.Second)
|
||||
s.switchMu.Unlock()
|
||||
s.emitter.Emit(EventStatus, Status{Status: StatusConnecting})
|
||||
}
|
||||
|
||||
// CancelProfileSwitch is called by callers that abort the switch midway
|
||||
// (the tray's Disconnect click while Connecting). Clears the suppression
|
||||
// flag so the next daemon Idle paints through immediately instead of
|
||||
// being swallowed.
|
||||
func (s *Peers) CancelProfileSwitch() {
|
||||
s.switchMu.Lock()
|
||||
s.switchInProgress = false
|
||||
s.switchMu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Peers) shouldSuppress(st Status) bool {
|
||||
s.switchMu.Lock()
|
||||
defer s.switchMu.Unlock()
|
||||
if !s.switchInProgress {
|
||||
return false
|
||||
}
|
||||
if time.Now().After(s.switchInProgressUntil) {
|
||||
s.switchInProgress = false
|
||||
return false
|
||||
}
|
||||
switch {
|
||||
case strings.EqualFold(st.Status, StatusConnecting),
|
||||
strings.EqualFold(st.Status, StatusNeedsLogin),
|
||||
strings.EqualFold(st.Status, StatusLoginFailed),
|
||||
strings.EqualFold(st.Status, StatusSessionExpired),
|
||||
strings.EqualFold(st.Status, StatusDaemonUnavailable):
|
||||
// New profile's flow has officially begun (Up started, or daemon
|
||||
// refused to start it). Clear the guard and let it through.
|
||||
s.switchInProgress = false
|
||||
return false
|
||||
default:
|
||||
// Connected (stale carryover from old profile's teardown) or Idle
|
||||
// (transient between Down and Up). Suppress so the optimistic
|
||||
// Connecting from BeginProfileSwitch stays painted.
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Watch starts the background loops that feed the frontend:
|
||||
// - statusStreamLoop: push-driven snapshots on connection-state change
|
||||
// (Connected/Disconnected/Connecting, peer list, address). Drives the
|
||||
@@ -272,6 +350,10 @@ func (s *Peers) statusStreamLoop(ctx context.Context) {
|
||||
unavailable = false
|
||||
st := statusFromProto(resp)
|
||||
log.Infof("backend event: status status=%q peers=%d", st.Status, len(st.Peers))
|
||||
if s.shouldSuppress(st) {
|
||||
log.Debugf("suppressing status=%q during profile switch", st.Status)
|
||||
continue
|
||||
}
|
||||
s.emitter.Emit(EventStatus, st)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,23 +8,48 @@ import (
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
)
|
||||
|
||||
// ProfileSwitcher encapsulates the full profile-switching reconnect policy so
|
||||
// both the tray and the React frontend use identical logic.
|
||||
// ProfileSwitcher encapsulates the full profile-switching reconnect policy
|
||||
// so both the tray and the React frontend use identical logic.
|
||||
//
|
||||
// Reconnect policy:
|
||||
// Reconnect policy + optimistic-feedback table (driven by prevStatus
|
||||
// captured from Peers.Get at SwitchActive entry):
|
||||
//
|
||||
// ┌─────────────────┬──────────────────────┬────────────────────────────────────┐
|
||||
// │ Previous status │ Action │ Rationale │
|
||||
// ├─────────────────┼──────────────────────┼────────────────────────────────────┤
|
||||
// │ Connected │ Switch + Down + Up │ Reconnect with the new profile. │
|
||||
// │ Connecting │ Switch + Down + Up │ Stop old retry loop, restart. │
|
||||
// │ NeedsLogin │ Switch + Down │ Clear stale error; user logs in. │
|
||||
// │ LoginFailed │ Switch + Down │ Clear stale error; user logs in. │
|
||||
// │ SessionExpired │ Switch + Down │ Clear stale error; user logs in. │
|
||||
// │ Idle │ Switch only │ User chose offline; don't connect. │
|
||||
// └─────────────────┴──────────────────────┴────────────────────────────────────┘
|
||||
// ┌─────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐
|
||||
// │ Previous status │ Action │ Optimistic UI label │ Suppressed events │
|
||||
// │ │ │ shown immediately │ until new flow │
|
||||
// ├─────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤
|
||||
// │ Connected │ Switch + Down + Up │ Connecting (synthetic) │ Connected, Idle │
|
||||
// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle │
|
||||
// │ NeedsLogin │ Switch + Down │ (no change) │ — │
|
||||
// │ LoginFailed │ Switch + Down │ (no change) │ — │
|
||||
// │ SessionExpired │ Switch + Down │ (no change) │ — │
|
||||
// │ Idle │ Switch only │ (no change) │ — │
|
||||
// └─────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘
|
||||
//
|
||||
// Only Connected/Connecting trigger the optimistic Connecting paint
|
||||
// (via Peers.BeginProfileSwitch): they're the only prevStatuses where
|
||||
// the daemon emits stale Connected updates (peer count drops as the
|
||||
// engine tears down) and then Idle, before the new profile's Up
|
||||
// resumes the stream. Both are swallowed by Peers.shouldSuppress
|
||||
// until a status that signals the new flow has begun (Connecting, or
|
||||
// any of the "Up won't run" terminal states: NeedsLogin / LoginFailed /
|
||||
// SessionExpired / DaemonUnavailable). The other prevStatuses either
|
||||
// don't drive Down/Up at all (Idle) or stop after Down (NeedsLogin /
|
||||
// LoginFailed / SessionExpired) — the resulting Idle is the correct
|
||||
// terminal state, so no suppression is needed.
|
||||
//
|
||||
// Rationale for each Action choice:
|
||||
//
|
||||
// Connected → Reconnect with the new profile.
|
||||
// Connecting → Stop old retry loop, restart.
|
||||
// NeedsLogin → Clear stale error; user logs in.
|
||||
// LoginFailed → Clear stale error; user logs in.
|
||||
// SessionExpired → Clear stale error; user logs in.
|
||||
// Idle → User chose offline; don't connect.
|
||||
type ProfileSwitcher struct {
|
||||
profiles *Profiles
|
||||
connection *Connection
|
||||
@@ -58,10 +83,35 @@ func (s *ProfileSwitcher) SwitchActive(ctx context.Context, p ProfileRef) error
|
||||
log.Infof("profileswitcher: switch profile=%q prevStatus=%q wasActive=%v needsDown=%v",
|
||||
p.ProfileName, prevStatus, wasActive, needsDown)
|
||||
|
||||
// Optimistic Connecting feedback for tray + React Status page: only
|
||||
// when wasActive — those are the prevStatuses where the daemon will
|
||||
// emit stale Connected + transient Idle pushes during Down before
|
||||
// the new profile's Up resumes the stream (see Peers godoc for the
|
||||
// suppression table). Other prevStatuses already terminate cleanly
|
||||
// on Idle, no suppression needed.
|
||||
if wasActive {
|
||||
s.peers.BeginProfileSwitch()
|
||||
}
|
||||
|
||||
if err := s.profiles.Switch(ctx, p); err != nil {
|
||||
return fmt.Errorf("switch profile %q: %w", p.ProfileName, err)
|
||||
}
|
||||
|
||||
// Mirror the daemon-side switch into the user-side ProfileManager state
|
||||
// (~/Library/Application Support/netbird/active_profile on macOS, the
|
||||
// equivalent user config dir elsewhere). The CLI's `netbird up` reads
|
||||
// from this file (cmd/up.go: pm.GetActiveProfile()) and then sends the
|
||||
// resolved name back in the Login/Up RPC — if it diverges from the
|
||||
// daemon-side /var/lib/netbird/active_profile.json, the daemon will
|
||||
// silently switch its active profile to whatever the CLI sends, so the
|
||||
// next CLI `up` after a UI switch reverts the profile. Failures here
|
||||
// don't abort the switch (the daemon is the authority; the local
|
||||
// mirror is a cache the CLI consults), but they leave the CLI's view
|
||||
// stale until the next successful switch — surface as a warning.
|
||||
if err := profilemanager.NewProfileManager().SwitchProfile(p.ProfileName); err != nil {
|
||||
log.Warnf("profileswitcher: mirror to user-side ProfileManager failed: %v", err)
|
||||
}
|
||||
|
||||
if needsDown {
|
||||
if err := s.connection.Down(ctx); err != nil {
|
||||
log.Errorf("profileswitcher: Down: %v", err)
|
||||
|
||||
@@ -133,6 +133,13 @@ type Tray struct {
|
||||
activeProfile string
|
||||
activeUsername string
|
||||
switchCancel context.CancelFunc
|
||||
|
||||
// profileLoadMu serializes loadProfiles so the daemon-status-driven
|
||||
// refresh in applyStatus cannot race with the ApplicationStarted seed
|
||||
// or the post-switchProfile reload — both manipulate profileSubmenu and
|
||||
// SetMenu, which the Wails menu API is not safe against concurrent
|
||||
// callers.
|
||||
profileLoadMu sync.Mutex
|
||||
}
|
||||
|
||||
func NewTray(app *application.App, window *application.WebviewWindow, svc TrayServices) *Tray {
|
||||
@@ -189,12 +196,12 @@ func (t *Tray) ShowWindow() {
|
||||
func (t *Tray) buildMenu() *application.Menu {
|
||||
menu := application.NewMenu()
|
||||
|
||||
// statusItem doubles as the "Login" entry once the daemon reports
|
||||
// NeedsLogin/SessionExpired — applyStatus toggles its enabled state and
|
||||
// label. The click handler is harmless while disabled, so we wire it
|
||||
// up unconditionally rather than swapping items at runtime.
|
||||
// statusItem shows the daemon's current status. Disabled (and no
|
||||
// OnClick handler) so clicks are no-ops — the row is informational
|
||||
// only. The Connect entry below drives every actionable transition,
|
||||
// including the SSO re-auth flow for NeedsLogin/SessionExpired
|
||||
// (the daemon's Up RPC returns NeedsSSOLogin when applicable).
|
||||
t.statusItem = menu.Add(menuStatusDisconnected).
|
||||
OnClick(func(*application.Context) { t.openRoute("/login") }).
|
||||
SetEnabled(false).
|
||||
SetBitmap(iconMenuDotIdle)
|
||||
|
||||
@@ -275,6 +282,20 @@ func (t *Tray) openRoute(route string) {
|
||||
}
|
||||
|
||||
func (t *Tray) handleConnect() {
|
||||
// NeedsLogin/SessionExpired/LoginFailed mean the daemon won't honor a
|
||||
// plain Up RPC ("up already in progress: current status NeedsLogin") —
|
||||
// it needs the Login → WaitSSOLogin → Up sequence instead. The
|
||||
// frontend's /login route already drives that flow, so route the
|
||||
// click there rather than re-implementing the SSO dance in the tray.
|
||||
t.mu.Lock()
|
||||
needsLogin := strings.EqualFold(t.lastStatus, services.StatusNeedsLogin) ||
|
||||
strings.EqualFold(t.lastStatus, services.StatusSessionExpired) ||
|
||||
strings.EqualFold(t.lastStatus, services.StatusLoginFailed)
|
||||
t.mu.Unlock()
|
||||
if needsLogin {
|
||||
t.openRoute("/login")
|
||||
return
|
||||
}
|
||||
t.upItem.SetEnabled(false)
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -287,8 +308,21 @@ func (t *Tray) handleConnect() {
|
||||
}()
|
||||
}
|
||||
|
||||
// handleDisconnect aborts any in-flight profile switch before sending
|
||||
// Down — otherwise the switcher's queued Up would re-establish the
|
||||
// connection right after the Disconnect, making the click look like a
|
||||
// no-op. Also clears Peers' optimistic-Connecting guard so the daemon's
|
||||
// Idle push (and any subsequent updates) paint through immediately
|
||||
// instead of being swallowed by the profile-switch suppression filter.
|
||||
func (t *Tray) handleDisconnect() {
|
||||
t.downItem.SetEnabled(false)
|
||||
t.mu.Lock()
|
||||
if t.switchCancel != nil {
|
||||
t.switchCancel()
|
||||
t.switchCancel = nil
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.svc.Peers.CancelProfileSwitch()
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -450,6 +484,11 @@ func (t *Tray) onUpdateProgress(ev *application.CustomEvent) {
|
||||
// (connected, hasUpdate, status label) changed — the daemon emits
|
||||
// rapid SubscribeStatus bursts during health probes that would
|
||||
// otherwise spam Shell_NotifyIcon and the log.
|
||||
//
|
||||
// Profile-switch suppression lives one layer up in services/peers.go
|
||||
// (Peers.BeginProfileSwitch / shouldSuppress) so the optimistic
|
||||
// Connecting paint and the suppressed Idle/Connected events are shared
|
||||
// with the React Status page rather than being a tray-only behaviour.
|
||||
func (t *Tray) applyStatus(st services.Status) {
|
||||
t.mu.Lock()
|
||||
connected := strings.EqualFold(st.Status, services.StatusConnected)
|
||||
@@ -475,16 +514,12 @@ func (t *Tray) applyStatus(st services.Status) {
|
||||
|
||||
if iconChanged {
|
||||
t.applyIcon()
|
||||
needsLogin := strings.EqualFold(st.Status, services.StatusNeedsLogin) ||
|
||||
strings.EqualFold(st.Status, services.StatusSessionExpired) ||
|
||||
strings.EqualFold(st.Status, services.StatusLoginFailed)
|
||||
daemonUnavailable := strings.EqualFold(st.Status, services.StatusDaemonUnavailable)
|
||||
connecting := strings.EqualFold(st.Status, services.StatusConnecting)
|
||||
if t.statusItem != nil {
|
||||
// When the daemon needs re-authentication the status row turns
|
||||
// into the actionable Login entry — Connect would only fail.
|
||||
// When the daemon socket is unreachable, swap the label to make
|
||||
// the cause obvious; Connect/Disconnect would just fail.
|
||||
// Label-only: kept disabled (informational row). Swap the
|
||||
// displayed text so the user sees a familiar phrase instead
|
||||
// of the raw daemon enum.
|
||||
label := st.Status
|
||||
switch {
|
||||
case daemonUnavailable:
|
||||
@@ -493,16 +528,19 @@ func (t *Tray) applyStatus(st services.Status) {
|
||||
label = menuStatusDisconnected
|
||||
}
|
||||
t.statusItem.SetLabel(label)
|
||||
t.statusItem.SetEnabled(needsLogin)
|
||||
t.statusItem.SetEnabled(false)
|
||||
t.applyStatusIndicator(st.Status)
|
||||
}
|
||||
if t.upItem != nil {
|
||||
// Hide Connect whenever an Up action would be a no-op or would
|
||||
// only fail: tunnel already up, daemon mid-connect (Disconnect
|
||||
// takes over the slot so the user can abort), login required,
|
||||
// or daemon socket unreachable.
|
||||
t.upItem.SetHidden(connected || connecting || needsLogin || daemonUnavailable)
|
||||
t.upItem.SetEnabled(!connected && !connecting && !needsLogin && !daemonUnavailable)
|
||||
// Connect stays visible/clickable in NeedsLogin/SessionExpired/
|
||||
// LoginFailed too — the daemon's Up RPC kicks off the SSO flow
|
||||
// when re-auth is required, mirroring the legacy Fyne client
|
||||
// where the same button drove the initial and the re-login
|
||||
// paths. Hidden only when the action would be a no-op (tunnel
|
||||
// up, daemon mid-connect — Disconnect takes the slot) or
|
||||
// would fail with no useful side effect (daemon unreachable).
|
||||
t.upItem.SetHidden(connected || connecting || daemonUnavailable)
|
||||
t.upItem.SetEnabled(!connected && !connecting && !daemonUnavailable)
|
||||
}
|
||||
if t.downItem != nil {
|
||||
// Disconnect is the abort path while the daemon is still
|
||||
@@ -526,6 +564,16 @@ func (t *Tray) applyStatus(st services.Status) {
|
||||
if t.debugItem != nil {
|
||||
t.debugItem.SetEnabled(!daemonUnavailable)
|
||||
}
|
||||
// Refresh the Profiles submenu on every status-text transition: the
|
||||
// daemon does not emit an active-profile event, so the startup race
|
||||
// (UI loads profiles before autoconnect picks the persisted profile)
|
||||
// and a CLI "profile select && up" both surface here. Fired AFTER
|
||||
// all SetHidden/SetEnabled writes on the static menu items above so
|
||||
// loadProfiles' SetMenu rebuild (which clearMenu+processMenu the
|
||||
// entire NSMenu and re-assigns item.impl) cannot race those
|
||||
// writes — the Wails 3 alpha menu API is not goroutine-safe and
|
||||
// reads item.disabled/item.hidden at NSMenuItem construction time.
|
||||
go t.loadProfiles()
|
||||
}
|
||||
if exitNodesChanged {
|
||||
t.rebuildExitNodes(exitNodes)
|
||||
@@ -711,12 +759,18 @@ func (t *Tray) loadConfig() {
|
||||
|
||||
// loadProfiles refreshes the Profiles submenu from the daemon. Each
|
||||
// entry is a checkbox showing the active profile and switches on click.
|
||||
// Called once on ApplicationStarted and again after a successful switch
|
||||
// so the checkmark moves to the new active profile.
|
||||
// Called on ApplicationStarted, after a successful switchProfile, and
|
||||
// from applyStatus whenever the daemon's status text changes — the
|
||||
// last case catches profile flips driven by another channel (CLI
|
||||
// "netbird profile select", autoconnect picking the persisted profile
|
||||
// after the UI's first ListProfiles, etc.) since the daemon does not
|
||||
// emit a dedicated active-profile event.
|
||||
func (t *Tray) loadProfiles() {
|
||||
if t.profileSubmenu == nil {
|
||||
return
|
||||
}
|
||||
t.profileLoadMu.Lock()
|
||||
defer t.profileLoadMu.Unlock()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -788,6 +842,14 @@ func (t *Tray) loadProfiles() {
|
||||
// switchProfile cancels any in-flight profile switch, then starts a new one.
|
||||
// Cancelling the previous context aborts its in-flight gRPC calls (Down/Up)
|
||||
// so rapid clicks always converge to the last selected profile.
|
||||
//
|
||||
// The optimistic Connecting paint (and suppression of the transient
|
||||
// Idle/stale Connected daemon events that follow Down) lives in
|
||||
// services/peers.go — ProfileSwitcher calls Peers.BeginProfileSwitch
|
||||
// when the previous status was Connected/Connecting, which emits a
|
||||
// synthetic Connecting status to the event bus and starts filtering
|
||||
// the daemon stream. That way both this tray and the React Status
|
||||
// page see the same optimistic state without duplicating policy.
|
||||
func (t *Tray) switchProfile(name string) {
|
||||
t.mu.Lock()
|
||||
if t.switchCancel != nil {
|
||||
|
||||
Reference in New Issue
Block a user