Merge branch 'ui-refactor' into ui-refactor-ui

This commit is contained in:
Eduard Gert
2026-05-15 10:16:30 +02:00
6 changed files with 329 additions and 48 deletions

View File

@@ -130,6 +130,26 @@ type Status struct {
// Peers serves the dashboard data: one polled Status RPC and a long-running
// SubscribeEvents stream that re-emits every event over the Wails event bus.
//
// Profile-switch suppression: ProfileSwitcher calls BeginProfileSwitch
// before tearing down the old profile when it would otherwise be followed
// by an Up on the new profile (i.e. previous status was Connected or
// Connecting). statusStreamLoop then swallows the transient stale
// Connected and Idle pushes the daemon emits during Down so the tray
// and the React Status page both see Connecting → new-profile-state
// instead of Connected → Connected → Idle → Connecting → new-state.
//
// Suppression transition (applied by shouldSuppress before each emit):
//
// ┌────────────────────────────────────────────┬──────────────────────────────────┐
// │ Incoming daemon status │ Action │
// ├────────────────────────────────────────────┼──────────────────────────────────┤
// │ Connected, Idle │ Suppress (the blink we hide) │
// │ Connecting │ Emit, clear flag (new Up began) │
// │ NeedsLogin, LoginFailed, SessionExpired, │ Emit, clear flag (new profile's │
// │ DaemonUnavailable │ "Up won't run" terminal state) │
// │ (timeout elapsed) │ Clear flag, emit normally │
// └────────────────────────────────────────────┴──────────────────────────────────┘
type Peers struct {
conn DaemonConn
emitter Emitter
@@ -137,12 +157,70 @@ type Peers struct {
mu sync.Mutex
cancel context.CancelFunc
streamWg sync.WaitGroup
switchMu sync.Mutex
switchInProgress bool
switchInProgressUntil time.Time
}
func NewPeers(conn DaemonConn, emitter Emitter) *Peers {
return &Peers{conn: conn, emitter: emitter}
}
// BeginProfileSwitch is called by ProfileSwitcher at the start of a switch
// when the previous status was Connected/Connecting — i.e. the daemon is
// about to emit Connected updates during Down's peer-count teardown and
// then an Idle before the new profile's Up resumes the stream. The flag
// makes statusStreamLoop drop those transient events. A synthetic
// Connecting snapshot is emitted right away so both consumers (tray and
// React) paint the optimistic state immediately. A 30s safety timeout
// clears the flag if the daemon never emits a follow-up status.
func (s *Peers) BeginProfileSwitch() {
s.switchMu.Lock()
s.switchInProgress = true
s.switchInProgressUntil = time.Now().Add(30 * time.Second)
s.switchMu.Unlock()
s.emitter.Emit(EventStatus, Status{Status: StatusConnecting})
}
// CancelProfileSwitch is called by callers that abort the switch midway
// (the tray's Disconnect click while Connecting). Clears the suppression
// flag so the next daemon Idle paints through immediately instead of
// being swallowed.
func (s *Peers) CancelProfileSwitch() {
s.switchMu.Lock()
s.switchInProgress = false
s.switchMu.Unlock()
}
func (s *Peers) shouldSuppress(st Status) bool {
s.switchMu.Lock()
defer s.switchMu.Unlock()
if !s.switchInProgress {
return false
}
if time.Now().After(s.switchInProgressUntil) {
s.switchInProgress = false
return false
}
switch {
case strings.EqualFold(st.Status, StatusConnecting),
strings.EqualFold(st.Status, StatusNeedsLogin),
strings.EqualFold(st.Status, StatusLoginFailed),
strings.EqualFold(st.Status, StatusSessionExpired),
strings.EqualFold(st.Status, StatusDaemonUnavailable):
// New profile's flow has officially begun (Up started, or daemon
// refused to start it). Clear the guard and let it through.
s.switchInProgress = false
return false
default:
// Connected (stale carryover from old profile's teardown) or Idle
// (transient between Down and Up). Suppress so the optimistic
// Connecting from BeginProfileSwitch stays painted.
return true
}
}
// Watch starts the background loops that feed the frontend:
// - statusStreamLoop: push-driven snapshots on connection-state change
// (Connected/Disconnected/Connecting, peer list, address). Drives the
@@ -272,6 +350,10 @@ func (s *Peers) statusStreamLoop(ctx context.Context) {
unavailable = false
st := statusFromProto(resp)
log.Infof("backend event: status status=%q peers=%d", st.Status, len(st.Peers))
if s.shouldSuppress(st) {
log.Debugf("suppressing status=%q during profile switch", st.Status)
continue
}
s.emitter.Emit(EventStatus, st)
}
}

View File

@@ -8,23 +8,48 @@ import (
"strings"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/profilemanager"
)
// ProfileSwitcher encapsulates the full profile-switching reconnect policy so
// both the tray and the React frontend use identical logic.
// ProfileSwitcher encapsulates the full profile-switching reconnect policy
// so both the tray and the React frontend use identical logic.
//
// Reconnect policy:
// Reconnect policy + optimistic-feedback table (driven by prevStatus
// captured from Peers.Get at SwitchActive entry):
//
// ┌─────────────────┬──────────────────────┬────────────────────────────────────┐
// │ Previous status │ Action │ Rationale
// ├─────────────────┼──────────────────────┼────────────────────────────────────┤
// │ Connected │ Switch + Down + Up │ Reconnect with the new profile. │
// │ Connecting │ Switch + Down + Up │ Stop old retry loop, restart.
// │ NeedsLogin │ Switch + Down │ Clear stale error; user logs in.
// │ LoginFailed │ Switch + Down │ Clear stale error; user logs in.
// │ SessionExpired │ Switch + Down │ Clear stale error; user logs in.
// │ Idle │ Switch only │ User chose offline; don't connect.
// └─────────────────┴──────────────────────┴────────────────────────────────────┘
// ┌─────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐
// │ Previous status │ Action │ Optimistic UI label │ Suppressed events
// │ │ │ shown immediately │ until new flow │
// ├─────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤
// │ Connected │ Switch + Down + Up │ Connecting (synthetic) │ Connected, Idle
// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle
// │ NeedsLogin │ Switch + Down │ (no change) │ —
// │ LoginFailed │ Switch + Down │ (no change) │ —
// │ SessionExpired │ Switch + Down │ (no change) │ —
// │ Idle │ Switch only │ (no change) │ — │
// └─────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘
//
// Only Connected/Connecting trigger the optimistic Connecting paint
// (via Peers.BeginProfileSwitch): they're the only prevStatuses where
// the daemon emits stale Connected updates (peer count drops as the
// engine tears down) and then Idle, before the new profile's Up
// resumes the stream. Both are swallowed by Peers.shouldSuppress
// until a status that signals the new flow has begun (Connecting, or
// any of the "Up won't run" terminal states: NeedsLogin / LoginFailed /
// SessionExpired / DaemonUnavailable). The other prevStatuses either
// don't drive Down/Up at all (Idle) or stop after Down (NeedsLogin /
// LoginFailed / SessionExpired) — the resulting Idle is the correct
// terminal state, so no suppression is needed.
//
// Rationale for each Action choice:
//
// Connected → Reconnect with the new profile.
// Connecting → Stop old retry loop, restart.
// NeedsLogin → Clear stale error; user logs in.
// LoginFailed → Clear stale error; user logs in.
// SessionExpired → Clear stale error; user logs in.
// Idle → User chose offline; don't connect.
type ProfileSwitcher struct {
profiles *Profiles
connection *Connection
@@ -58,10 +83,35 @@ func (s *ProfileSwitcher) SwitchActive(ctx context.Context, p ProfileRef) error
log.Infof("profileswitcher: switch profile=%q prevStatus=%q wasActive=%v needsDown=%v",
p.ProfileName, prevStatus, wasActive, needsDown)
// Optimistic Connecting feedback for tray + React Status page: only
// when wasActive — those are the prevStatuses where the daemon will
// emit stale Connected + transient Idle pushes during Down before
// the new profile's Up resumes the stream (see Peers godoc for the
// suppression table). Other prevStatuses already terminate cleanly
// on Idle, no suppression needed.
if wasActive {
s.peers.BeginProfileSwitch()
}
if err := s.profiles.Switch(ctx, p); err != nil {
return fmt.Errorf("switch profile %q: %w", p.ProfileName, err)
}
// Mirror the daemon-side switch into the user-side ProfileManager state
// (~/Library/Application Support/netbird/active_profile on macOS, the
// equivalent user config dir elsewhere). The CLI's `netbird up` reads
// from this file (cmd/up.go: pm.GetActiveProfile()) and then sends the
// resolved name back in the Login/Up RPC — if it diverges from the
// daemon-side /var/lib/netbird/active_profile.json, the daemon will
// silently switch its active profile to whatever the CLI sends, so the
// next CLI `up` after a UI switch reverts the profile. Failures here
// don't abort the switch (the daemon is the authority; the local
// mirror is a cache the CLI consults), but they leave the CLI's view
// stale until the next successful switch — surface as a warning.
if err := profilemanager.NewProfileManager().SwitchProfile(p.ProfileName); err != nil {
log.Warnf("profileswitcher: mirror to user-side ProfileManager failed: %v", err)
}
if needsDown {
if err := s.connection.Down(ctx); err != nil {
log.Errorf("profileswitcher: Down: %v", err)

View File

@@ -133,6 +133,13 @@ type Tray struct {
activeProfile string
activeUsername string
switchCancel context.CancelFunc
// profileLoadMu serializes loadProfiles so the daemon-status-driven
// refresh in applyStatus cannot race with the ApplicationStarted seed
// or the post-switchProfile reload — both manipulate profileSubmenu and
// SetMenu, which the Wails menu API is not safe against concurrent
// callers.
profileLoadMu sync.Mutex
}
func NewTray(app *application.App, window *application.WebviewWindow, svc TrayServices) *Tray {
@@ -189,12 +196,12 @@ func (t *Tray) ShowWindow() {
func (t *Tray) buildMenu() *application.Menu {
menu := application.NewMenu()
// statusItem doubles as the "Login" entry once the daemon reports
// NeedsLogin/SessionExpired — applyStatus toggles its enabled state and
// label. The click handler is harmless while disabled, so we wire it
// up unconditionally rather than swapping items at runtime.
// statusItem shows the daemon's current status. Disabled (and no
// OnClick handler) so clicks are no-ops — the row is informational
// only. The Connect entry below drives every actionable transition,
// including the SSO re-auth flow for NeedsLogin/SessionExpired
// (the daemon's Up RPC returns NeedsSSOLogin when applicable).
t.statusItem = menu.Add(menuStatusDisconnected).
OnClick(func(*application.Context) { t.openRoute("/login") }).
SetEnabled(false).
SetBitmap(iconMenuDotIdle)
@@ -275,6 +282,20 @@ func (t *Tray) openRoute(route string) {
}
func (t *Tray) handleConnect() {
// NeedsLogin/SessionExpired/LoginFailed mean the daemon won't honor a
// plain Up RPC ("up already in progress: current status NeedsLogin") —
// it needs the Login → WaitSSOLogin → Up sequence instead. The
// frontend's /login route already drives that flow, so route the
// click there rather than re-implementing the SSO dance in the tray.
t.mu.Lock()
needsLogin := strings.EqualFold(t.lastStatus, services.StatusNeedsLogin) ||
strings.EqualFold(t.lastStatus, services.StatusSessionExpired) ||
strings.EqualFold(t.lastStatus, services.StatusLoginFailed)
t.mu.Unlock()
if needsLogin {
t.openRoute("/login")
return
}
t.upItem.SetEnabled(false)
go func() {
ctx, cancel := context.WithCancel(context.Background())
@@ -287,8 +308,21 @@ func (t *Tray) handleConnect() {
}()
}
// handleDisconnect aborts any in-flight profile switch before sending
// Down — otherwise the switcher's queued Up would re-establish the
// connection right after the Disconnect, making the click look like a
// no-op. Also clears Peers' optimistic-Connecting guard so the daemon's
// Idle push (and any subsequent updates) paint through immediately
// instead of being swallowed by the profile-switch suppression filter.
func (t *Tray) handleDisconnect() {
t.downItem.SetEnabled(false)
t.mu.Lock()
if t.switchCancel != nil {
t.switchCancel()
t.switchCancel = nil
}
t.mu.Unlock()
t.svc.Peers.CancelProfileSwitch()
go func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -450,6 +484,11 @@ func (t *Tray) onUpdateProgress(ev *application.CustomEvent) {
// (connected, hasUpdate, status label) changed — the daemon emits
// rapid SubscribeStatus bursts during health probes that would
// otherwise spam Shell_NotifyIcon and the log.
//
// Profile-switch suppression lives one layer up in services/peers.go
// (Peers.BeginProfileSwitch / shouldSuppress) so the optimistic
// Connecting paint and the suppressed Idle/Connected events are shared
// with the React Status page rather than being a tray-only behaviour.
func (t *Tray) applyStatus(st services.Status) {
t.mu.Lock()
connected := strings.EqualFold(st.Status, services.StatusConnected)
@@ -475,16 +514,12 @@ func (t *Tray) applyStatus(st services.Status) {
if iconChanged {
t.applyIcon()
needsLogin := strings.EqualFold(st.Status, services.StatusNeedsLogin) ||
strings.EqualFold(st.Status, services.StatusSessionExpired) ||
strings.EqualFold(st.Status, services.StatusLoginFailed)
daemonUnavailable := strings.EqualFold(st.Status, services.StatusDaemonUnavailable)
connecting := strings.EqualFold(st.Status, services.StatusConnecting)
if t.statusItem != nil {
// When the daemon needs re-authentication the status row turns
// into the actionable Login entry — Connect would only fail.
// When the daemon socket is unreachable, swap the label to make
// the cause obvious; Connect/Disconnect would just fail.
// Label-only: kept disabled (informational row). Swap the
// displayed text so the user sees a familiar phrase instead
// of the raw daemon enum.
label := st.Status
switch {
case daemonUnavailable:
@@ -493,16 +528,19 @@ func (t *Tray) applyStatus(st services.Status) {
label = menuStatusDisconnected
}
t.statusItem.SetLabel(label)
t.statusItem.SetEnabled(needsLogin)
t.statusItem.SetEnabled(false)
t.applyStatusIndicator(st.Status)
}
if t.upItem != nil {
// Hide Connect whenever an Up action would be a no-op or would
// only fail: tunnel already up, daemon mid-connect (Disconnect
// takes over the slot so the user can abort), login required,
// or daemon socket unreachable.
t.upItem.SetHidden(connected || connecting || needsLogin || daemonUnavailable)
t.upItem.SetEnabled(!connected && !connecting && !needsLogin && !daemonUnavailable)
// Connect stays visible/clickable in NeedsLogin/SessionExpired/
// LoginFailed too — the daemon's Up RPC kicks off the SSO flow
// when re-auth is required, mirroring the legacy Fyne client
// where the same button drove the initial and the re-login
// paths. Hidden only when the action would be a no-op (tunnel
// up, daemon mid-connect — Disconnect takes the slot) or
// would fail with no useful side effect (daemon unreachable).
t.upItem.SetHidden(connected || connecting || daemonUnavailable)
t.upItem.SetEnabled(!connected && !connecting && !daemonUnavailable)
}
if t.downItem != nil {
// Disconnect is the abort path while the daemon is still
@@ -526,6 +564,16 @@ func (t *Tray) applyStatus(st services.Status) {
if t.debugItem != nil {
t.debugItem.SetEnabled(!daemonUnavailable)
}
// Refresh the Profiles submenu on every status-text transition: the
// daemon does not emit an active-profile event, so the startup race
// (UI loads profiles before autoconnect picks the persisted profile)
// and a CLI "profile select && up" both surface here. Fired AFTER
// all SetHidden/SetEnabled writes on the static menu items above so
// loadProfiles' SetMenu rebuild (which clearMenu+processMenu the
// entire NSMenu and re-assigns item.impl) cannot race those
// writes — the Wails 3 alpha menu API is not goroutine-safe and
// reads item.disabled/item.hidden at NSMenuItem construction time.
go t.loadProfiles()
}
if exitNodesChanged {
t.rebuildExitNodes(exitNodes)
@@ -711,12 +759,18 @@ func (t *Tray) loadConfig() {
// loadProfiles refreshes the Profiles submenu from the daemon. Each
// entry is a checkbox showing the active profile and switches on click.
// Called once on ApplicationStarted and again after a successful switch
// so the checkmark moves to the new active profile.
// Called on ApplicationStarted, after a successful switchProfile, and
// from applyStatus whenever the daemon's status text changes — the
// last case catches profile flips driven by another channel (CLI
// "netbird profile select", autoconnect picking the persisted profile
// after the UI's first ListProfiles, etc.) since the daemon does not
// emit a dedicated active-profile event.
func (t *Tray) loadProfiles() {
if t.profileSubmenu == nil {
return
}
t.profileLoadMu.Lock()
defer t.profileLoadMu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -788,6 +842,14 @@ func (t *Tray) loadProfiles() {
// switchProfile cancels any in-flight profile switch, then starts a new one.
// Cancelling the previous context aborts its in-flight gRPC calls (Down/Up)
// so rapid clicks always converge to the last selected profile.
//
// The optimistic Connecting paint (and suppression of the transient
// Idle/stale Connected daemon events that follow Down) lives in
// services/peers.go — ProfileSwitcher calls Peers.BeginProfileSwitch
// when the previous status was Connected/Connecting, which emits a
// synthetic Connecting status to the event bus and starts filtering
// the daemon stream. That way both this tray and the React Status
// page see the same optimistic state without duplicating policy.
func (t *Tray) switchProfile(name string) {
t.mu.Lock()
if t.switchCancel != nil {