[client/ui] Fix stale LoginFailed/NeedsLogin state after profile switch

When the active profile was in LoginFailed, NeedsLogin, or SessionExpired,
switching to another profile left the daemon holding stale management/signal
errors. The new profile inherited the error state from the previous one.

Two fixes:
1. server.go Down(): reset statusRecorder management/signal errors so the
   next Up() starts with a clean status snapshot instead of the previous
   profile's error state.
2. tray.go switchProfile(): add NeedsLogin/LoginFailed/SessionExpired to
   the needsDown set. Down() is called to flush stale daemon state, but
   Up() is not — the user initiates login on the new profile manually.
This commit is contained in:
Zoltan Papp
2026-05-13 15:13:20 +02:00
parent 6c9b821bf0
commit c0cd88a3d0
2 changed files with 37 additions and 23 deletions

View File

@@ -866,6 +866,15 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes
// stuck at Connecting long after the user asked to disconnect.
internal.CtxGetState(s.rootCtx).Set(internal.StatusIdle)
// Clear stale management/signal errors so the next Up() (typically for a
// different profile) starts with a clean status snapshot. Without this,
// a managementError left over from a LoginFailed cycle persists in the
// statusRecorder and appears in the new profile's initial
// SubscribeStatus snapshot, making the new profile look like it also
// failed to log in.
s.statusRecorder.MarkManagementDisconnected(nil)
s.statusRecorder.MarkSignalDisconnected(nil)
return &proto.DownResponse{}, nil
}

View File

@@ -804,29 +804,33 @@ func (t *Tray) loadProfiles() {
//
// Reconnect policy by previous daemon status:
//
// ┌─────────────────┬──────────────────────┬───────────────────────────────────┐
// │ Previous status │ Tray action │ Rationale │
// ├─────────────────┼──────────────────────┼───────────────────────────────────┤
// │ Connected │ Switch + Down + Up │ Reconnect with the new profile. │
// │ Connecting │ Switch + Down + Up │ Stop the retry loop still dialing │
// │ │ │ the old management server, then │
// │ │ │ restart with new config. │
// │ Idle │ Switch only │ User chose to be offline; don't │
// │ │ │ silently flip the daemon online. │
// │ NeedsLogin │ Switch only Login needs interactive SSO; let
// │ LoginFailed │ Switch only the user trigger the next step.
// │ SessionExpired │ Switch only
// └─────────────────┴──────────────────────┴───────────────────────────────────┘
// ┌─────────────────┬──────────────────────┬───────────────────────────────────
// │ Previous status │ Tray action │ Rationale
// ├─────────────────┼──────────────────────┼───────────────────────────────────
// │ Connected │ Switch + Down + Up │ Reconnect with the new profile.
// │ Connecting │ Switch + Down + Up │ Stop the retry loop still dialing
// │ │ │ the old management server, then
// │ │ │ restart with new config.
// │ Idle │ Switch only │ User chose to be offline; don't
// │ │ │ silently flip the daemon online.
// │ NeedsLogin │ Switch + DownClean stale error state so the new
// │ LoginFailed │ Switch + Downprofile starts from Idle. User
// │ SessionExpired │ Switch + Downinitiates login manually.
// └─────────────────┴──────────────────────┴───────────────────────────────────
//
// Rule of thumb: auto-reconnect only when the daemon was actively trying
// to be online (Connected or Connecting). Any other state is a deliberate
// waiting point — keep the user in control of the next action.
// Rule of thumb: auto-reconnect only when the daemon was actively trying to be
// online (Connected or Connecting). Login-error states get Down so stale errors
// are cleared, but no Up — the user initiates login on the new profile manually.
func (t *Tray) switchProfile(name string) {
t.mu.Lock()
prevStatus := t.lastStatus
t.mu.Unlock()
wasActive := strings.EqualFold(prevStatus, statusConnected) ||
strings.EqualFold(prevStatus, statusConnecting)
needsDown := wasActive ||
strings.EqualFold(prevStatus, statusNeedsLogin) ||
strings.EqualFold(prevStatus, statusLoginFailed) ||
strings.EqualFold(prevStatus, statusSessionExpired)
go func() {
ctx, cancel := context.WithCancel(context.Background())
@@ -837,8 +841,8 @@ func (t *Tray) switchProfile(name string) {
log.Errorf("get current user: %v", err)
return
}
log.Infof("tray switchProfile: sending SwitchProfile RPC profile=%q user=%q prevStatus=%q wasActive=%v",
name, username, prevStatus, wasActive)
log.Infof("tray switchProfile: sending SwitchProfile RPC profile=%q user=%q prevStatus=%q wasActive=%v needsDown=%v",
name, username, prevStatus, wasActive, needsDown)
if err := t.svc.Profiles.Switch(ctx, services.ProfileRef{
ProfileName: name,
Username: username,
@@ -849,14 +853,15 @@ func (t *Tray) switchProfile(name string) {
}
log.Infof("tray switchProfile: SwitchProfile RPC succeeded profile=%q", name)
if wasActive {
// Stop the in-flight (or established) connection that's still
// pointing at the previous profile's management server, then
// bring it back up against the new profile.
log.Infof("tray switchProfile: was active (%s), reconnecting with new profile %q", prevStatus, name)
if needsDown {
log.Infof("tray switchProfile: calling Down to clean up previous state (%s)", prevStatus)
if err := t.svc.Connection.Down(ctx); err != nil {
log.Errorf("tray switchProfile: Down failed: %v", err)
}
}
if wasActive {
// Bring the connection back up against the new profile's management server.
log.Infof("tray switchProfile: was active (%s), reconnecting with new profile %q", prevStatus, name)
if err := t.svc.Connection.Up(ctx, services.UpParams{
ProfileName: name,
Username: username,