mirror of
https://github.com/netbirdio/netbird.git
synced 2026-05-16 05:39:56 +00:00
The optimistic Connecting paint and the Idle/stale-Connected
suppression lived in the tray's applyStatus, so only the tray got the
smoothed-out transition during a profile switch — the React Status
page (useStatus hook in frontend) subscribes to the same
netbird:status event and was seeing the raw daemon stream, complete
with the Disconnected blink.
Move the policy one layer up into the Peers service, between
SubscribeStatus and the Wails event bus, so every consumer downstream
sees the same filtered stream:
* Peers gains BeginProfileSwitch / CancelProfileSwitch / shouldSuppress.
BeginProfileSwitch sets the in-progress flag and emits a synthetic
Connecting status so both the tray and React paint Connecting
immediately. shouldSuppress swallows the daemon's stale Connected
(peer-count teardown) and transient Idle (Down between flows)
until Connecting / NeedsLogin / LoginFailed / SessionExpired /
DaemonUnavailable indicates the new profile's flow has started,
or a 30s safety timeout fires.
* ProfileSwitcher.SwitchActive calls peers.BeginProfileSwitch when
wasActive (prevStatus was Connected or Connecting) — the only
cases where the daemon emits the blink-inducing sequence. Other
prevStatuses already terminate cleanly on Idle.
* Tray loses its switchInProgress fields, applyOptimisticConnecting
helper, applyStatus suppression switch, and switchProfile's
optimistic-paint call. handleDisconnect now calls
Peers.CancelProfileSwitch alongside cancelling switchCancel, so
the abort path bypasses the suppression filter and the daemon's
Idle paints through immediately.
The full prevStatus -> action / optimistic label / suppressed events
matrix now lives in the ProfileSwitcher struct godoc, with the
suppression-rule-per-incoming-status table on the Peers struct
godoc — together they describe the click-time policy and the
stream-filter behaviour without duplication.
Wails bindings need regenerating to pick up Peers.BeginProfileSwitch
and Peers.CancelProfileSwitch.
129 lines
6.2 KiB
Go
129 lines
6.2 KiB
Go
//go:build !android && !ios && !freebsd && !js
|
|
|
|
package services
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
|
)
|
|
|
|
// ProfileSwitcher encapsulates the full profile-switching reconnect policy
|
|
// so both the tray and the React frontend use identical logic.
|
|
//
|
|
// Reconnect policy + optimistic-feedback table (driven by prevStatus
|
|
// captured from Peers.Get at SwitchActive entry):
|
|
//
|
|
// ┌─────────────────┬──────────────────────┬──────────────────────────┬────────────────────┐
|
|
// │ Previous status │ Action │ Optimistic UI label │ Suppressed events │
|
|
// │ │ │ shown immediately │ until new flow │
|
|
// ├─────────────────┼──────────────────────┼──────────────────────────┼────────────────────┤
|
|
// │ Connected │ Switch + Down + Up │ Connecting (synthetic) │ Connected, Idle │
|
|
// │ Connecting │ Switch + Down + Up │ Connecting (unchanged) │ Connected, Idle │
|
|
// │ NeedsLogin │ Switch + Down │ (no change) │ — │
|
|
// │ LoginFailed │ Switch + Down │ (no change) │ — │
|
|
// │ SessionExpired │ Switch + Down │ (no change) │ — │
|
|
// │ Idle │ Switch only │ (no change) │ — │
|
|
// └─────────────────┴──────────────────────┴──────────────────────────┴────────────────────┘
|
|
//
|
|
// Only Connected/Connecting trigger the optimistic Connecting paint
|
|
// (via Peers.BeginProfileSwitch): they're the only prevStatuses where
|
|
// the daemon emits stale Connected updates (peer count drops as the
|
|
// engine tears down) and then Idle, before the new profile's Up
|
|
// resumes the stream. Both are swallowed by Peers.shouldSuppress
|
|
// until a status that signals the new flow has begun (Connecting, or
|
|
// any of the "Up won't run" terminal states: NeedsLogin / LoginFailed /
|
|
// SessionExpired / DaemonUnavailable). The other prevStatuses either
|
|
// don't drive Down/Up at all (Idle) or stop after Down (NeedsLogin /
|
|
// LoginFailed / SessionExpired) — the resulting Idle is the correct
|
|
// terminal state, so no suppression is needed.
|
|
//
|
|
// Rationale for each Action choice:
|
|
//
|
|
// Connected → Reconnect with the new profile.
|
|
// Connecting → Stop old retry loop, restart.
|
|
// NeedsLogin → Clear stale error; user logs in.
|
|
// LoginFailed → Clear stale error; user logs in.
|
|
// SessionExpired → Clear stale error; user logs in.
|
|
// Idle → User chose offline; don't connect.
|
|
type ProfileSwitcher struct {
|
|
profiles *Profiles
|
|
connection *Connection
|
|
peers *Peers
|
|
}
|
|
|
|
// NewProfileSwitcher creates a ProfileSwitcher backed by the given services.
|
|
func NewProfileSwitcher(profiles *Profiles, connection *Connection, peers *Peers) *ProfileSwitcher {
|
|
return &ProfileSwitcher{profiles: profiles, connection: connection, peers: peers}
|
|
}
|
|
|
|
// SwitchActive switches to the named profile applying the reconnect policy.
|
|
// All RPCs complete quickly: Up uses async mode so the daemon starts the
|
|
// connection attempt and returns immediately; status updates flow via the
|
|
// SubscribeStatus stream.
|
|
func (s *ProfileSwitcher) SwitchActive(ctx context.Context, p ProfileRef) error {
|
|
prevStatus := ""
|
|
if st, err := s.peers.Get(ctx); err == nil {
|
|
prevStatus = st.Status
|
|
} else {
|
|
log.Warnf("profileswitcher: get status: %v", err)
|
|
}
|
|
|
|
wasActive := strings.EqualFold(prevStatus, StatusConnected) ||
|
|
strings.EqualFold(prevStatus, StatusConnecting)
|
|
needsDown := wasActive ||
|
|
strings.EqualFold(prevStatus, StatusNeedsLogin) ||
|
|
strings.EqualFold(prevStatus, StatusLoginFailed) ||
|
|
strings.EqualFold(prevStatus, StatusSessionExpired)
|
|
|
|
log.Infof("profileswitcher: switch profile=%q prevStatus=%q wasActive=%v needsDown=%v",
|
|
p.ProfileName, prevStatus, wasActive, needsDown)
|
|
|
|
// Optimistic Connecting feedback for tray + React Status page: only
|
|
// when wasActive — those are the prevStatuses where the daemon will
|
|
// emit stale Connected + transient Idle pushes during Down before
|
|
// the new profile's Up resumes the stream (see Peers godoc for the
|
|
// suppression table). Other prevStatuses already terminate cleanly
|
|
// on Idle, no suppression needed.
|
|
if wasActive {
|
|
s.peers.BeginProfileSwitch()
|
|
}
|
|
|
|
if err := s.profiles.Switch(ctx, p); err != nil {
|
|
return fmt.Errorf("switch profile %q: %w", p.ProfileName, err)
|
|
}
|
|
|
|
// Mirror the daemon-side switch into the user-side ProfileManager state
|
|
// (~/Library/Application Support/netbird/active_profile on macOS, the
|
|
// equivalent user config dir elsewhere). The CLI's `netbird up` reads
|
|
// from this file (cmd/up.go: pm.GetActiveProfile()) and then sends the
|
|
// resolved name back in the Login/Up RPC — if it diverges from the
|
|
// daemon-side /var/lib/netbird/active_profile.json, the daemon will
|
|
// silently switch its active profile to whatever the CLI sends, so the
|
|
// next CLI `up` after a UI switch reverts the profile. Failures here
|
|
// don't abort the switch (the daemon is the authority; the local
|
|
// mirror is a cache the CLI consults), but they leave the CLI's view
|
|
// stale until the next successful switch — surface as a warning.
|
|
if err := profilemanager.NewProfileManager().SwitchProfile(p.ProfileName); err != nil {
|
|
log.Warnf("profileswitcher: mirror to user-side ProfileManager failed: %v", err)
|
|
}
|
|
|
|
if needsDown {
|
|
if err := s.connection.Down(ctx); err != nil {
|
|
log.Errorf("profileswitcher: Down: %v", err)
|
|
}
|
|
}
|
|
|
|
if wasActive {
|
|
if err := s.connection.Up(ctx, UpParams(p)); err != nil {
|
|
return fmt.Errorf("reconnect %q: %w", p.ProfileName, err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|