Merge branch 'ui-refactor' into ui-refactor-ui

This commit is contained in:
Eduard Gert
2026-05-15 10:16:30 +02:00
6 changed files with 329 additions and 48 deletions

View File

@@ -130,6 +130,26 @@ type Status struct {
// Peers serves the dashboard data: one polled Status RPC and a long-running
// SubscribeEvents stream that re-emits every event over the Wails event bus.
//
// Profile-switch suppression: ProfileSwitcher calls BeginProfileSwitch
// before tearing down the old profile when it would otherwise be followed
// by an Up on the new profile (i.e. previous status was Connected or
// Connecting). statusStreamLoop then swallows the transient stale
// Connected and Idle pushes the daemon emits during Down so the tray
// and the React Status page both see Connecting → new-profile-state
// instead of Connected → Connected → Idle → Connecting → new-state.
//
// Suppression transition (applied by shouldSuppress before each emit):
//
// ┌────────────────────────────────────────────┬──────────────────────────────────┐
// │ Incoming daemon status │ Action │
// ├────────────────────────────────────────────┼──────────────────────────────────┤
// │ Connected, Idle │ Suppress (the blink we hide) │
// │ Connecting │ Emit, clear flag (new Up began) │
// │ NeedsLogin, LoginFailed, SessionExpired, │ Emit, clear flag (new profile's │
// │ DaemonUnavailable │ "Up won't run" terminal state) │
// │ (timeout elapsed) │ Clear flag, emit normally │
// └────────────────────────────────────────────┴──────────────────────────────────┘
type Peers struct {
conn DaemonConn
emitter Emitter
@@ -137,12 +157,70 @@ type Peers struct {
mu sync.Mutex
cancel context.CancelFunc
streamWg sync.WaitGroup
switchMu sync.Mutex
switchInProgress bool
switchInProgressUntil time.Time
}
func NewPeers(conn DaemonConn, emitter Emitter) *Peers {
return &Peers{conn: conn, emitter: emitter}
}
// BeginProfileSwitch is called by ProfileSwitcher at the start of a switch
// when the previous status was Connected/Connecting — i.e. the daemon is
// about to emit Connected updates during Down's peer-count teardown and
// then an Idle before the new profile's Up resumes the stream. The flag
// makes statusStreamLoop drop those transient events. A synthetic
// Connecting snapshot is emitted right away so both consumers (tray and
// React) paint the optimistic state immediately. A 30s safety timeout
// clears the flag if the daemon never emits a follow-up status.
func (s *Peers) BeginProfileSwitch() {
s.switchMu.Lock()
s.switchInProgress = true
s.switchInProgressUntil = time.Now().Add(30 * time.Second)
s.switchMu.Unlock()
s.emitter.Emit(EventStatus, Status{Status: StatusConnecting})
}
// CancelProfileSwitch is called by callers that abort the switch midway
// (the tray's Disconnect click while Connecting). Clears the suppression
// flag so the next daemon Idle paints through immediately instead of
// being swallowed.
func (s *Peers) CancelProfileSwitch() {
s.switchMu.Lock()
s.switchInProgress = false
s.switchMu.Unlock()
}
func (s *Peers) shouldSuppress(st Status) bool {
s.switchMu.Lock()
defer s.switchMu.Unlock()
if !s.switchInProgress {
return false
}
if time.Now().After(s.switchInProgressUntil) {
s.switchInProgress = false
return false
}
switch {
case strings.EqualFold(st.Status, StatusConnecting),
strings.EqualFold(st.Status, StatusNeedsLogin),
strings.EqualFold(st.Status, StatusLoginFailed),
strings.EqualFold(st.Status, StatusSessionExpired),
strings.EqualFold(st.Status, StatusDaemonUnavailable):
// New profile's flow has officially begun (Up started, or daemon
// refused to start it). Clear the guard and let it through.
s.switchInProgress = false
return false
default:
// Connected (stale carryover from old profile's teardown) or Idle
// (transient between Down and Up). Suppress so the optimistic
// Connecting from BeginProfileSwitch stays painted.
return true
}
}
// Watch starts the background loops that feed the frontend:
// - statusStreamLoop: push-driven snapshots on connection-state change
// (Connected/Disconnected/Connecting, peer list, address). Drives the
@@ -272,6 +350,10 @@ func (s *Peers) statusStreamLoop(ctx context.Context) {
unavailable = false
st := statusFromProto(resp)
log.Infof("backend event: status status=%q peers=%d", st.Status, len(st.Peers))
if s.shouldSuppress(st) {
log.Debugf("suppressing status=%q during profile switch", st.Status)
continue
}
s.emitter.Emit(EventStatus, st)
}
}

View File

@@ -8,23 +8,48 @@ import (
"strings"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/profilemanager"
)
// ProfileSwitcher encapsulates the full profile-switching reconnect policy so
// both the tray and the React frontend use identical logic.
// ProfileSwitcher encapsulates the full profile-switching reconnect policy
// so both the tray and the React frontend use identical logic.
//
// Reconnect policy:
// Reconnect policy + optimistic-feedback table (driven by prevStatus
// captured from Peers.Get at SwitchActive entry):
//
// ┌─────────────────┬──────────────────────┬────────────────────────────────────┐
// │ Previous status │ Action │ Rationale
// ├─────────────────┼──────────────────────┼────────────────────────────────────┤
// │ Connected │ Switch + Down + Up │ Reconnect with the new profile. │
// │ Connecting │ Switch + Down + Up │ Stop old retry loop, restart.
// │ NeedsLogin │ Switch + Down │ Clear stale error; user logs in.
// │ LoginFailed │ Switch + Down │ Clear stale error; user logs in.
// │ SessionExpired │ Switch + Down │ Clear stale error; user logs in.
// │ Idle │ Switch only │ User chose offline; don't connect.
// └─────────────────┴──────────────────────┴────────────────────────────────────┘
// ┌─────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐
// │ Previous status │ Action │ Optimistic UI label │ Suppressed events
// │ │ │ shown immediately │ until new flow │
// ├─────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤
// │ Connected │ Switch + Down + Up │ Connecting (synthetic) │ Connected, Idle
// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle
// │ NeedsLogin │ Switch + Down │ (no change) │ —
// │ LoginFailed │ Switch + Down │ (no change) │ —
// │ SessionExpired │ Switch + Down │ (no change) │ —
// │ Idle │ Switch only │ (no change) │ — │
// └─────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘
//
// Only Connected/Connecting trigger the optimistic Connecting paint
// (via Peers.BeginProfileSwitch): they're the only prevStatuses where
// the daemon emits stale Connected updates (peer count drops as the
// engine tears down) and then Idle, before the new profile's Up
// resumes the stream. Both are swallowed by Peers.shouldSuppress
// until a status that signals the new flow has begun (Connecting, or
// any of the "Up won't run" terminal states: NeedsLogin / LoginFailed /
// SessionExpired / DaemonUnavailable). The other prevStatuses either
// don't drive Down/Up at all (Idle) or stop after Down (NeedsLogin /
// LoginFailed / SessionExpired) — the resulting Idle is the correct
// terminal state, so no suppression is needed.
//
// Rationale for each Action choice:
//
// Connected → Reconnect with the new profile.
// Connecting → Stop old retry loop, restart.
// NeedsLogin → Clear stale error; user logs in.
// LoginFailed → Clear stale error; user logs in.
// SessionExpired → Clear stale error; user logs in.
// Idle → User chose offline; don't connect.
type ProfileSwitcher struct {
profiles *Profiles
connection *Connection
@@ -58,10 +83,35 @@ func (s *ProfileSwitcher) SwitchActive(ctx context.Context, p ProfileRef) error
log.Infof("profileswitcher: switch profile=%q prevStatus=%q wasActive=%v needsDown=%v",
p.ProfileName, prevStatus, wasActive, needsDown)
// Optimistic Connecting feedback for tray + React Status page: only
// when wasActive — those are the prevStatuses where the daemon will
// emit stale Connected + transient Idle pushes during Down before
// the new profile's Up resumes the stream (see Peers godoc for the
// suppression table). Other prevStatuses already terminate cleanly
// on Idle, no suppression needed.
if wasActive {
s.peers.BeginProfileSwitch()
}
if err := s.profiles.Switch(ctx, p); err != nil {
return fmt.Errorf("switch profile %q: %w", p.ProfileName, err)
}
// Mirror the daemon-side switch into the user-side ProfileManager state
// (~/Library/Application Support/netbird/active_profile on macOS, the
// equivalent user config dir elsewhere). The CLI's `netbird up` reads
// from this file (cmd/up.go: pm.GetActiveProfile()) and then sends the
// resolved name back in the Login/Up RPC — if it diverges from the
// daemon-side /var/lib/netbird/active_profile.json, the daemon will
// silently switch its active profile to whatever the CLI sends, so the
// next CLI `up` after a UI switch reverts the profile. Failures here
// don't abort the switch (the daemon is the authority; the local
// mirror is a cache the CLI consults), but they leave the CLI's view
// stale until the next successful switch — surface as a warning.
if err := profilemanager.NewProfileManager().SwitchProfile(p.ProfileName); err != nil {
log.Warnf("profileswitcher: mirror to user-side ProfileManager failed: %v", err)
}
if needsDown {
if err := s.connection.Down(ctx); err != nil {
log.Errorf("profileswitcher: Down: %v", err)