mirror of
https://github.com/netbirdio/netbird.git
synced 2026-05-17 14:19:54 +00:00
Merge remote-tracking branch 'origin/main' into fix/login-cmd-root-flags
This commit is contained in:
365
client/server/capture.go
Normal file
365
client/server/capture.go
Normal file
@@ -0,0 +1,365 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/util/capture"
|
||||
)
|
||||
|
||||
const maxBundleCaptureDuration = 10 * time.Minute
|
||||
|
||||
// bundleCapture holds the state of an in-progress capture destined for the
|
||||
// debug bundle. The lifecycle is:
|
||||
//
|
||||
// StartBundleCapture → capture running, writing to temp file
|
||||
// StopBundleCapture → capture stopped, temp file available
|
||||
// DebugBundle → temp file included in zip, then cleaned up
|
||||
type bundleCapture struct {
|
||||
mu sync.Mutex
|
||||
sess *capture.Session
|
||||
file *os.File
|
||||
engine *internal.Engine
|
||||
cancel context.CancelFunc
|
||||
stopped bool
|
||||
}
|
||||
|
||||
// stop halts the capture session and closes the pcap writer. Idempotent.
|
||||
func (bc *bundleCapture) stop() {
|
||||
bc.mu.Lock()
|
||||
defer bc.mu.Unlock()
|
||||
|
||||
if bc.stopped {
|
||||
return
|
||||
}
|
||||
bc.stopped = true
|
||||
|
||||
if bc.cancel != nil {
|
||||
bc.cancel()
|
||||
}
|
||||
if bc.sess != nil {
|
||||
bc.sess.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// path returns the temp file path, or "" if no file exists.
|
||||
func (bc *bundleCapture) path() string {
|
||||
if bc.file == nil {
|
||||
return ""
|
||||
}
|
||||
return bc.file.Name()
|
||||
}
|
||||
|
||||
// cleanup removes the temp file.
|
||||
func (bc *bundleCapture) cleanup() {
|
||||
if bc.file == nil {
|
||||
return
|
||||
}
|
||||
name := bc.file.Name()
|
||||
if err := bc.file.Close(); err != nil {
|
||||
log.Debugf("close bundle capture file: %v", err)
|
||||
}
|
||||
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
|
||||
log.Debugf("remove bundle capture file: %v", err)
|
||||
}
|
||||
bc.file = nil
|
||||
}
|
||||
|
||||
// StartCapture streams a pcap or text packet capture over gRPC.
|
||||
// Gated by the --enable-capture service flag.
|
||||
func (s *Server) StartCapture(req *proto.StartCaptureRequest, stream proto.DaemonService_StartCaptureServer) error {
|
||||
if !s.captureEnabled {
|
||||
return status.Error(codes.PermissionDenied,
|
||||
"packet capture is disabled; reinstall or reconfigure the service with --enable-capture")
|
||||
}
|
||||
|
||||
if d := req.GetDuration(); d != nil && d.AsDuration() < 0 {
|
||||
return status.Error(codes.InvalidArgument, "duration must not be negative")
|
||||
}
|
||||
|
||||
matcher, err := parseCaptureFilter(req)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid filter: %v", err)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
opts := capture.Options{
|
||||
Matcher: matcher,
|
||||
SnapLen: req.GetSnapLen(),
|
||||
Verbose: req.GetVerbose(),
|
||||
ASCII: req.GetAscii(),
|
||||
}
|
||||
if req.GetTextOutput() {
|
||||
opts.TextOutput = pw
|
||||
} else {
|
||||
opts.Output = pw
|
||||
}
|
||||
|
||||
sess, err := capture.NewSession(opts)
|
||||
if err != nil {
|
||||
pw.Close()
|
||||
return status.Errorf(codes.Internal, "create capture session: %v", err)
|
||||
}
|
||||
|
||||
engine, err := s.claimCapture(sess)
|
||||
if err != nil {
|
||||
sess.Stop()
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := engine.SetCapture(sess); err != nil {
|
||||
s.releaseCapture(sess)
|
||||
sess.Stop()
|
||||
pw.Close()
|
||||
return status.Errorf(codes.Internal, "set capture: %v", err)
|
||||
}
|
||||
|
||||
// Send an empty initial message to signal that the capture was accepted.
|
||||
// The client waits for this before printing the banner, so it must arrive
|
||||
// before any packet data.
|
||||
if err := stream.Send(&proto.CapturePacket{}); err != nil {
|
||||
s.clearCaptureIfOwner(sess, engine)
|
||||
sess.Stop()
|
||||
pw.Close()
|
||||
return status.Errorf(codes.Internal, "send initial message: %v", err)
|
||||
}
|
||||
|
||||
ctx := stream.Context()
|
||||
if d := req.GetDuration(); d != nil {
|
||||
if dur := d.AsDuration(); dur > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, dur)
|
||||
defer cancel()
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.clearCaptureIfOwner(sess, engine)
|
||||
sess.Stop()
|
||||
pw.Close()
|
||||
}()
|
||||
defer pr.Close()
|
||||
|
||||
log.Infof("packet capture started (text=%v, expr=%q)", req.GetTextOutput(), req.GetFilterExpr())
|
||||
defer func() {
|
||||
stats := sess.Stats()
|
||||
log.Infof("packet capture stopped: %d packets, %d bytes, %d dropped",
|
||||
stats.Packets, stats.Bytes, stats.Dropped)
|
||||
}()
|
||||
|
||||
return streamToGRPC(pr, stream)
|
||||
}
|
||||
|
||||
func streamToGRPC(r io.Reader, stream proto.DaemonService_StartCaptureServer) error {
|
||||
buf := make([]byte, 32*1024)
|
||||
for {
|
||||
n, readErr := r.Read(buf)
|
||||
if n > 0 {
|
||||
if err := stream.Send(&proto.CapturePacket{Data: buf[:n]}); err != nil {
|
||||
log.Debugf("capture stream send: %v", err)
|
||||
return nil //nolint:nilerr // client disconnected
|
||||
}
|
||||
}
|
||||
if readErr != nil {
|
||||
return nil //nolint:nilerr // pipe closed, capture stopped normally
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartBundleCapture begins capturing packets to a server-side temp file for
|
||||
// inclusion in the next debug bundle. Not gated by --enable-capture since the
|
||||
// output stays on the server (same trust level as CPU profiling).
|
||||
//
|
||||
// A timeout auto-stops the capture as a safety net if StopBundleCapture is
|
||||
// never called (e.g. CLI crash).
|
||||
func (s *Server) StartBundleCapture(_ context.Context, req *proto.StartBundleCaptureRequest) (*proto.StartBundleCaptureResponse, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
s.stopBundleCaptureLocked()
|
||||
s.cleanupBundleCapture()
|
||||
|
||||
if s.activeCapture != nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "another capture is already running")
|
||||
}
|
||||
|
||||
engine, err := s.getCaptureEngineLocked()
|
||||
if err != nil {
|
||||
// Not fatal: kernel mode or not connected. Log and return success
|
||||
// so the debug bundle still generates without capture data.
|
||||
log.Warnf("packet capture unavailable, skipping: %v", err)
|
||||
return &proto.StartBundleCaptureResponse{}, nil
|
||||
}
|
||||
|
||||
timeout := req.GetTimeout().AsDuration()
|
||||
if timeout <= 0 || timeout > maxBundleCaptureDuration {
|
||||
timeout = maxBundleCaptureDuration
|
||||
}
|
||||
|
||||
f, err := os.CreateTemp("", "netbird.capture.*.pcap")
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "create temp file: %v", err)
|
||||
}
|
||||
|
||||
sess, err := capture.NewSession(capture.Options{Output: f})
|
||||
if err != nil {
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
return nil, status.Errorf(codes.Internal, "create capture session: %v", err)
|
||||
}
|
||||
|
||||
if err := engine.SetCapture(sess); err != nil {
|
||||
sess.Stop()
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
log.Warnf("packet capture unavailable (no filtered device), skipping: %v", err)
|
||||
return &proto.StartBundleCaptureResponse{}, nil
|
||||
}
|
||||
s.activeCapture = sess
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
bc := &bundleCapture{
|
||||
sess: sess,
|
||||
file: f,
|
||||
engine: engine,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
s.bundleCapture = bc
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.mutex.Lock()
|
||||
if s.bundleCapture == bc {
|
||||
s.stopBundleCaptureLocked()
|
||||
} else {
|
||||
bc.stop()
|
||||
}
|
||||
s.mutex.Unlock()
|
||||
log.Infof("bundle capture auto-stopped after timeout")
|
||||
}()
|
||||
log.Infof("bundle capture started (timeout=%s, file=%s)", timeout, f.Name())
|
||||
|
||||
return &proto.StartBundleCaptureResponse{}, nil
|
||||
}
|
||||
|
||||
// StopBundleCapture stops the running bundle capture. Idempotent.
|
||||
func (s *Server) StopBundleCapture(_ context.Context, _ *proto.StopBundleCaptureRequest) (*proto.StopBundleCaptureResponse, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
s.stopBundleCaptureLocked()
|
||||
return &proto.StopBundleCaptureResponse{}, nil
|
||||
}
|
||||
|
||||
// stopBundleCaptureLocked stops the bundle capture if running. Must hold s.mutex.
|
||||
func (s *Server) stopBundleCaptureLocked() {
|
||||
if s.bundleCapture == nil {
|
||||
return
|
||||
}
|
||||
bc := s.bundleCapture
|
||||
if bc.engine != nil && s.activeCapture == bc.sess {
|
||||
if err := bc.engine.SetCapture(nil); err != nil {
|
||||
log.Debugf("clear bundle capture: %v", err)
|
||||
}
|
||||
s.activeCapture = nil
|
||||
}
|
||||
bc.stop()
|
||||
|
||||
stats := bc.sess.Stats()
|
||||
log.Infof("bundle capture stopped: %d packets, %d bytes, %d dropped",
|
||||
stats.Packets, stats.Bytes, stats.Dropped)
|
||||
}
|
||||
|
||||
// bundleCapturePath returns the temp file path if a capture has been taken,
|
||||
// stops any running capture, and returns "". Called from DebugBundle.
|
||||
// Must hold s.mutex.
|
||||
func (s *Server) bundleCapturePath() string {
|
||||
if s.bundleCapture == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
s.bundleCapture.stop()
|
||||
return s.bundleCapture.path()
|
||||
}
|
||||
|
||||
// cleanupBundleCapture removes the temp file and clears state. Must hold s.mutex.
|
||||
func (s *Server) cleanupBundleCapture() {
|
||||
if s.bundleCapture == nil {
|
||||
return
|
||||
}
|
||||
s.bundleCapture.cleanup()
|
||||
s.bundleCapture = nil
|
||||
}
|
||||
|
||||
// claimCapture reserves the engine's capture slot for sess. Returns
|
||||
// FailedPrecondition if another capture is already active.
|
||||
func (s *Server) claimCapture(sess *capture.Session) (*internal.Engine, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if s.activeCapture != nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "another capture is already running")
|
||||
}
|
||||
engine, err := s.getCaptureEngineLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.activeCapture = sess
|
||||
return engine, nil
|
||||
}
|
||||
|
||||
// releaseCapture clears the active-capture owner if it still matches sess.
|
||||
func (s *Server) releaseCapture(sess *capture.Session) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if s.activeCapture == sess {
|
||||
s.activeCapture = nil
|
||||
}
|
||||
}
|
||||
|
||||
// clearCaptureIfOwner clears engine's capture slot only if sess still owns it.
|
||||
func (s *Server) clearCaptureIfOwner(sess *capture.Session, engine *internal.Engine) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if s.activeCapture != sess {
|
||||
return
|
||||
}
|
||||
if err := engine.SetCapture(nil); err != nil {
|
||||
log.Debugf("clear capture: %v", err)
|
||||
}
|
||||
s.activeCapture = nil
|
||||
}
|
||||
|
||||
func (s *Server) getCaptureEngineLocked() (*internal.Engine, error) {
|
||||
if s.connectClient == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "client not connected")
|
||||
}
|
||||
engine := s.connectClient.Engine()
|
||||
if engine == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "engine not initialized")
|
||||
}
|
||||
return engine, nil
|
||||
}
|
||||
|
||||
// parseCaptureFilter returns a Matcher from the request.
|
||||
// Returns nil (match all) when no filter expression is set.
|
||||
func parseCaptureFilter(req *proto.StartCaptureRequest) (capture.Matcher, error) {
|
||||
expr := req.GetFilterExpr()
|
||||
if expr == "" {
|
||||
return nil, nil //nolint:nilnil // nil Matcher means "match all"
|
||||
}
|
||||
return capture.ParseFilter(expr)
|
||||
}
|
||||
@@ -3,25 +3,19 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/debug"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
mgmProto "github.com/netbirdio/netbird/shared/management/proto"
|
||||
"github.com/netbirdio/netbird/upload-server/types"
|
||||
)
|
||||
|
||||
const maxBundleUploadSize = 50 * 1024 * 1024
|
||||
|
||||
// DebugBundle creates a debug bundle and returns the location.
|
||||
func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) (resp *proto.DebugBundleResponse, err error) {
|
||||
s.mutex.Lock()
|
||||
@@ -32,16 +26,50 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) (
|
||||
log.Warnf("failed to get latest sync response: %v", err)
|
||||
}
|
||||
|
||||
var clientMetrics debug.MetricsExporter
|
||||
if s.connectClient != nil {
|
||||
if engine := s.connectClient.Engine(); engine != nil {
|
||||
if cm := engine.GetClientMetrics(); cm != nil {
|
||||
clientMetrics = cm
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var cpuProfileData []byte
|
||||
if s.cpuProfileBuf != nil && !s.cpuProfiling {
|
||||
cpuProfileData = s.cpuProfileBuf.Bytes()
|
||||
defer func() {
|
||||
s.cpuProfileBuf = nil
|
||||
}()
|
||||
}
|
||||
|
||||
capturePath := s.bundleCapturePath()
|
||||
defer s.cleanupBundleCapture()
|
||||
|
||||
var refreshStatus func()
|
||||
if s.connectClient != nil {
|
||||
engine := s.connectClient.Engine()
|
||||
if engine != nil {
|
||||
refreshStatus = func() {
|
||||
log.Debug("refreshing system health status for debug bundle")
|
||||
engine.RunHealthProbes(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bundleGenerator := debug.NewBundleGenerator(
|
||||
debug.GeneratorDependencies{
|
||||
InternalConfig: s.config,
|
||||
StatusRecorder: s.statusRecorder,
|
||||
SyncResponse: syncResponse,
|
||||
LogFile: s.logFile,
|
||||
LogPath: s.logFile,
|
||||
CPUProfile: cpuProfileData,
|
||||
CapturePath: capturePath,
|
||||
RefreshStatus: refreshStatus,
|
||||
ClientMetrics: clientMetrics,
|
||||
},
|
||||
debug.BundleConfig{
|
||||
Anonymize: req.GetAnonymize(),
|
||||
ClientStatus: req.GetStatus(),
|
||||
IncludeSystemInfo: req.GetSystemInfo(),
|
||||
LogFileCount: req.GetLogFileCount(),
|
||||
},
|
||||
@@ -55,7 +83,7 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) (
|
||||
if req.GetUploadURL() == "" {
|
||||
return &proto.DebugBundleResponse{Path: path}, nil
|
||||
}
|
||||
key, err := uploadDebugBundle(context.Background(), req.GetUploadURL(), s.config.ManagementURL.String(), path)
|
||||
key, err := debug.UploadDebugBundle(context.Background(), req.GetUploadURL(), s.config.ManagementURL.String(), path)
|
||||
if err != nil {
|
||||
log.Errorf("failed to upload debug bundle to %s: %v", req.GetUploadURL(), err)
|
||||
return &proto.DebugBundleResponse{Path: path, UploadFailureReason: err.Error()}, nil
|
||||
@@ -66,92 +94,6 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) (
|
||||
return &proto.DebugBundleResponse{Path: path, UploadedKey: key}, nil
|
||||
}
|
||||
|
||||
func uploadDebugBundle(ctx context.Context, url, managementURL, filePath string) (key string, err error) {
|
||||
response, err := getUploadURL(ctx, url, managementURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = upload(ctx, filePath, response)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return response.Key, nil
|
||||
}
|
||||
|
||||
func upload(ctx context.Context, filePath string, response *types.GetURLResponse) error {
|
||||
fileData, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
|
||||
defer fileData.Close()
|
||||
|
||||
stat, err := fileData.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("stat file: %w", err)
|
||||
}
|
||||
|
||||
if stat.Size() > maxBundleUploadSize {
|
||||
return fmt.Errorf("file size exceeds maximum limit of %d bytes", maxBundleUploadSize)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", response.URL, fileData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create PUT request: %w", err)
|
||||
}
|
||||
|
||||
req.ContentLength = stat.Size()
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
putResp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("upload failed: %v", err)
|
||||
}
|
||||
defer putResp.Body.Close()
|
||||
|
||||
if putResp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(putResp.Body)
|
||||
return fmt.Errorf("upload status %d: %s", putResp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUploadURL(ctx context.Context, url string, managementURL string) (*types.GetURLResponse, error) {
|
||||
id := getURLHash(managementURL)
|
||||
getReq, err := http.NewRequestWithContext(ctx, "GET", url+"?id="+id, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create GET request: %w", err)
|
||||
}
|
||||
|
||||
getReq.Header.Set(types.ClientHeader, types.ClientHeaderValue)
|
||||
|
||||
resp, err := http.DefaultClient.Do(getReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get presigned URL: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("get presigned URL status %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
urlBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read response body: %w", err)
|
||||
}
|
||||
var response types.GetURLResponse
|
||||
if err := json.Unmarshal(urlBytes, &response); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal response: %w", err)
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
func getURLHash(url string) string {
|
||||
return fmt.Sprintf("%x", sha256.Sum256([]byte(url)))
|
||||
}
|
||||
|
||||
// GetLogLevel gets the current logging level for the server.
|
||||
func (s *Server) GetLogLevel(_ context.Context, _ *proto.GetLogLevelRequest) (*proto.GetLogLevelResponse, error) {
|
||||
s.mutex.Lock()
|
||||
@@ -173,20 +115,9 @@ func (s *Server) SetLogLevel(_ context.Context, req *proto.SetLogLevelRequest) (
|
||||
|
||||
log.SetLevel(level)
|
||||
|
||||
if s.connectClient == nil {
|
||||
return nil, fmt.Errorf("connect client not initialized")
|
||||
if s.connectClient != nil {
|
||||
s.connectClient.SetLogLevel(level)
|
||||
}
|
||||
engine := s.connectClient.Engine()
|
||||
if engine == nil {
|
||||
return nil, fmt.Errorf("engine not initialized")
|
||||
}
|
||||
|
||||
fwManager := engine.GetFirewallManager()
|
||||
if fwManager == nil {
|
||||
return nil, fmt.Errorf("firewall manager not initialized")
|
||||
}
|
||||
|
||||
fwManager.SetLogLevel(level)
|
||||
|
||||
log.Infof("Log level set to %s", level.String())
|
||||
|
||||
@@ -215,3 +146,43 @@ func (s *Server) getLatestSyncResponse() (*mgmProto.SyncResponse, error) {
|
||||
|
||||
return cClient.GetLatestSyncResponse()
|
||||
}
|
||||
|
||||
// StartCPUProfile starts CPU profiling in the daemon.
|
||||
func (s *Server) StartCPUProfile(_ context.Context, _ *proto.StartCPUProfileRequest) (*proto.StartCPUProfileResponse, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if s.cpuProfiling {
|
||||
return nil, fmt.Errorf("CPU profiling already in progress")
|
||||
}
|
||||
|
||||
s.cpuProfileBuf = &bytes.Buffer{}
|
||||
s.cpuProfiling = true
|
||||
if err := pprof.StartCPUProfile(s.cpuProfileBuf); err != nil {
|
||||
s.cpuProfileBuf = nil
|
||||
s.cpuProfiling = false
|
||||
return nil, fmt.Errorf("start CPU profile: %w", err)
|
||||
}
|
||||
|
||||
log.Info("CPU profiling started")
|
||||
return &proto.StartCPUProfileResponse{}, nil
|
||||
}
|
||||
|
||||
// StopCPUProfile stops CPU profiling in the daemon.
|
||||
func (s *Server) StopCPUProfile(_ context.Context, _ *proto.StopCPUProfileRequest) (*proto.StopCPUProfileResponse, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if !s.cpuProfiling {
|
||||
return nil, fmt.Errorf("CPU profiling not in progress")
|
||||
}
|
||||
|
||||
pprof.StopCPUProfile()
|
||||
s.cpuProfiling = false
|
||||
|
||||
if s.cpuProfileBuf != nil {
|
||||
log.Infof("CPU profiling stopped, captured %d bytes", s.cpuProfileBuf.Len())
|
||||
}
|
||||
|
||||
return &proto.StopCPUProfileResponse{}, nil
|
||||
}
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/upload-server/server"
|
||||
"github.com/netbirdio/netbird/upload-server/types"
|
||||
)
|
||||
|
||||
func TestUpload(t *testing.T) {
|
||||
if os.Getenv("DOCKER_CI") == "true" {
|
||||
t.Skip("Skipping upload test on docker ci")
|
||||
}
|
||||
testDir := t.TempDir()
|
||||
testURL := "http://localhost:8080"
|
||||
t.Setenv("SERVER_URL", testURL)
|
||||
t.Setenv("STORE_DIR", testDir)
|
||||
srv := server.NewServer()
|
||||
go func() {
|
||||
if err := srv.Start(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
t.Errorf("Failed to start server: %v", err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
if err := srv.Stop(); err != nil {
|
||||
t.Errorf("Failed to stop server: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
file := filepath.Join(t.TempDir(), "tmpfile")
|
||||
fileContent := []byte("test file content")
|
||||
err := os.WriteFile(file, fileContent, 0640)
|
||||
require.NoError(t, err)
|
||||
key, err := uploadDebugBundle(context.Background(), testURL+types.GetURLPath, testURL, file)
|
||||
require.NoError(t, err)
|
||||
id := getURLHash(testURL)
|
||||
require.Contains(t, key, id+"/")
|
||||
expectedFilePath := filepath.Join(testDir, key)
|
||||
createdFileContent, err := os.ReadFile(expectedFilePath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fileContent, createdFileContent)
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
@@ -16,6 +14,7 @@ func (s *Server) SubscribeEvents(req *proto.SubscribeRequest, stream proto.Daemo
|
||||
}()
|
||||
|
||||
log.Debug("client subscribed to events")
|
||||
s.startUpdateManagerForGUI()
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -29,8 +28,3 @@ func (s *Server) SubscribeEvents(req *proto.SubscribeRequest, stream proto.Daemo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) GetEvents(context.Context, *proto.GetEventsRequest) (*proto.GetEventsResponse, error) {
|
||||
events := s.statusRecorder.GetEventHistory()
|
||||
return &proto.GetEventsResponse{Events: events}, nil
|
||||
}
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// NotifyOSLifecycle handles operating system lifecycle events by executing appropriate logic based on the request type.
|
||||
func (s *Server) NotifyOSLifecycle(callerCtx context.Context, req *proto.OSLifecycleRequest) (*proto.OSLifecycleResponse, error) {
|
||||
switch req.GetType() {
|
||||
case proto.OSLifecycleRequest_WAKEUP:
|
||||
return s.handleWakeUp(callerCtx)
|
||||
case proto.OSLifecycleRequest_SLEEP:
|
||||
return s.handleSleep(callerCtx)
|
||||
default:
|
||||
log.Errorf("unknown OSLifecycleRequest type: %v", req.GetType())
|
||||
}
|
||||
return &proto.OSLifecycleResponse{}, nil
|
||||
}
|
||||
|
||||
// handleWakeUp processes a wake-up event by triggering the Up command if the system was previously put to sleep.
|
||||
// It resets the sleep state and logs the process. Returns a response or an error if the Up command fails.
|
||||
func (s *Server) handleWakeUp(callerCtx context.Context) (*proto.OSLifecycleResponse, error) {
|
||||
if !s.sleepTriggeredDown.Load() {
|
||||
log.Info("skipping up because wasn't sleep down")
|
||||
return &proto.OSLifecycleResponse{}, nil
|
||||
}
|
||||
|
||||
// avoid other wakeup runs if sleep didn't make the computer sleep
|
||||
s.sleepTriggeredDown.Store(false)
|
||||
|
||||
log.Info("running up after wake up")
|
||||
_, err := s.Up(callerCtx, &proto.UpRequest{})
|
||||
if err != nil {
|
||||
log.Errorf("running up failed: %v", err)
|
||||
return &proto.OSLifecycleResponse{}, err
|
||||
}
|
||||
|
||||
log.Info("running up command executed successfully")
|
||||
return &proto.OSLifecycleResponse{}, nil
|
||||
}
|
||||
|
||||
// handleSleep handles the sleep event by initiating a "down" sequence if the system is in a connected or connecting state.
|
||||
func (s *Server) handleSleep(callerCtx context.Context) (*proto.OSLifecycleResponse, error) {
|
||||
s.mutex.Lock()
|
||||
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
status, err := state.Status()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
return &proto.OSLifecycleResponse{}, err
|
||||
}
|
||||
|
||||
if status != internal.StatusConnecting && status != internal.StatusConnected {
|
||||
log.Infof("skipping setting the agent down because status is %s", status)
|
||||
s.mutex.Unlock()
|
||||
return &proto.OSLifecycleResponse{}, nil
|
||||
}
|
||||
s.mutex.Unlock()
|
||||
|
||||
log.Info("running down after system started sleeping")
|
||||
|
||||
_, err = s.Down(callerCtx, &proto.DownRequest{})
|
||||
if err != nil {
|
||||
log.Errorf("running down failed: %v", err)
|
||||
return &proto.OSLifecycleResponse{}, err
|
||||
}
|
||||
|
||||
s.sleepTriggeredDown.Store(true)
|
||||
|
||||
log.Info("running down executed successfully")
|
||||
return &proto.OSLifecycleResponse{}, nil
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
func newTestServer() *Server {
|
||||
ctx := internal.CtxInitState(context.Background())
|
||||
return &Server{
|
||||
rootCtx: ctx,
|
||||
statusRecorder: peer.NewRecorder(""),
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_WakeUp_SkipsWhenNotSleepTriggered(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
// sleepTriggeredDown is false by default
|
||||
assert.False(t, s.sleepTriggeredDown.Load())
|
||||
|
||||
resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_WAKEUP,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.False(t, s.sleepTriggeredDown.Load(), "flag should remain false")
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_Sleep_SkipsWhenStatusIdle(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(internal.StatusIdle)
|
||||
|
||||
resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_SLEEP,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.False(t, s.sleepTriggeredDown.Load(), "flag should remain false when status is Idle")
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_Sleep_SkipsWhenStatusNeedsLogin(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(internal.StatusNeedsLogin)
|
||||
|
||||
resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_SLEEP,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.False(t, s.sleepTriggeredDown.Load(), "flag should remain false when status is NeedsLogin")
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_Sleep_SetsFlag_WhenConnecting(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(internal.StatusConnecting)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s.actCancel = cancel
|
||||
|
||||
resp, err := s.NotifyOSLifecycle(ctx, &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_SLEEP,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp, "handleSleep returns not nil response on success")
|
||||
assert.True(t, s.sleepTriggeredDown.Load(), "flag should be set after sleep when connecting")
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_Sleep_SetsFlag_WhenConnected(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(internal.StatusConnected)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s.actCancel = cancel
|
||||
|
||||
resp, err := s.NotifyOSLifecycle(ctx, &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_SLEEP,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp, "handleSleep returns not nil response on success")
|
||||
assert.True(t, s.sleepTriggeredDown.Load(), "flag should be set after sleep when connected")
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_WakeUp_ResetsFlag(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
// Manually set the flag to simulate prior sleep down
|
||||
s.sleepTriggeredDown.Store(true)
|
||||
|
||||
// WakeUp will try to call Up which fails without proper setup, but flag should reset first
|
||||
_, _ = s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_WAKEUP,
|
||||
})
|
||||
|
||||
assert.False(t, s.sleepTriggeredDown.Load(), "flag should be reset after WakeUp attempt")
|
||||
}
|
||||
|
||||
func TestNotifyOSLifecycle_MultipleWakeUpCalls(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
// First wakeup without prior sleep - should be no-op
|
||||
resp, err := s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_WAKEUP,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.False(t, s.sleepTriggeredDown.Load())
|
||||
|
||||
// Simulate prior sleep
|
||||
s.sleepTriggeredDown.Store(true)
|
||||
|
||||
// First wakeup after sleep - should reset flag
|
||||
_, _ = s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_WAKEUP,
|
||||
})
|
||||
assert.False(t, s.sleepTriggeredDown.Load())
|
||||
|
||||
// Second wakeup - should be no-op
|
||||
resp, err = s.NotifyOSLifecycle(context.Background(), &proto.OSLifecycleRequest{
|
||||
Type: proto.OSLifecycleRequest_WAKEUP,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.False(t, s.sleepTriggeredDown.Load())
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_SkipsWhenFlagFalse(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
resp, err := s.handleWakeUp(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_ResetsFlagBeforeUp(t *testing.T) {
|
||||
s := newTestServer()
|
||||
s.sleepTriggeredDown.Store(true)
|
||||
|
||||
// Even if Up fails, flag should be reset
|
||||
_, _ = s.handleWakeUp(context.Background())
|
||||
|
||||
assert.False(t, s.sleepTriggeredDown.Load(), "flag must be reset before calling Up")
|
||||
}
|
||||
|
||||
func TestHandleSleep_SkipsForNonActiveStates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
status internal.StatusType
|
||||
}{
|
||||
{"Idle", internal.StatusIdle},
|
||||
{"NeedsLogin", internal.StatusNeedsLogin},
|
||||
{"LoginFailed", internal.StatusLoginFailed},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := newTestServer()
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(tt.status)
|
||||
|
||||
resp, err := s.handleSleep(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
assert.False(t, s.sleepTriggeredDown.Load())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSleep_ProceedsForActiveStates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
status internal.StatusType
|
||||
}{
|
||||
{"Connecting", internal.StatusConnecting},
|
||||
{"Connected", internal.StatusConnected},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := newTestServer()
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(tt.status)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s.actCancel = cancel
|
||||
|
||||
resp, err := s.handleSleep(ctx)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, resp)
|
||||
assert.True(t, s.sleepTriggeredDown.Load())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"google.golang.org/grpc/codes"
|
||||
gstatus "google.golang.org/grpc/status"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/route"
|
||||
@@ -27,6 +29,10 @@ func (s *Server) ListNetworks(context.Context, *proto.ListNetworksRequest) (*pro
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if s.networksDisabled {
|
||||
return nil, gstatus.Errorf(codes.Unavailable, errNetworksDisabled)
|
||||
}
|
||||
|
||||
if s.connectClient == nil {
|
||||
return nil, fmt.Errorf("not connected")
|
||||
}
|
||||
@@ -118,6 +124,10 @@ func (s *Server) SelectNetworks(_ context.Context, req *proto.SelectNetworksRequ
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if s.networksDisabled {
|
||||
return nil, gstatus.Errorf(codes.Unavailable, errNetworksDisabled)
|
||||
}
|
||||
|
||||
if s.connectClient == nil {
|
||||
return nil, fmt.Errorf("not connected")
|
||||
}
|
||||
@@ -164,6 +174,10 @@ func (s *Server) DeselectNetworks(_ context.Context, req *proto.SelectNetworksRe
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if s.networksDisabled {
|
||||
return nil, gstatus.Errorf(codes.Unavailable, errNetworksDisabled)
|
||||
}
|
||||
|
||||
if s.connectClient == nil {
|
||||
return nil, fmt.Errorf("not connected")
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package server
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -13,25 +14,26 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
gstatus "google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/auth"
|
||||
"github.com/netbirdio/netbird/client/internal/expose"
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
sleephandler "github.com/netbirdio/netbird/client/internal/sleep/handler"
|
||||
"github.com/netbirdio/netbird/client/system"
|
||||
mgm "github.com/netbirdio/netbird/shared/management/client"
|
||||
"github.com/netbirdio/netbird/shared/management/domain"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/internal/statemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/updater"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/util/capture"
|
||||
"github.com/netbirdio/netbird/version"
|
||||
)
|
||||
|
||||
@@ -52,6 +54,7 @@ const (
|
||||
errRestoreResidualState = "failed to restore residual state: %v"
|
||||
errProfilesDisabled = "profiles are disabled, you cannot use this feature without profiles enabled"
|
||||
errUpdateSettingsDisabled = "update settings are disabled, you cannot use this feature without update settings enabled"
|
||||
errNetworksDisabled = "network selection is disabled by the administrator"
|
||||
)
|
||||
|
||||
var ErrServiceNotUp = errors.New("service is not up")
|
||||
@@ -70,7 +73,7 @@ type Server struct {
|
||||
proto.UnimplementedDaemonServiceServer
|
||||
clientRunning bool // protected by mutex
|
||||
clientRunningChan chan struct{}
|
||||
clientGiveUpChan chan struct{}
|
||||
clientGiveUpChan chan struct{} // closed when connectWithRetryRuns goroutine exits
|
||||
|
||||
connectClient *internal.ConnectClient
|
||||
|
||||
@@ -81,12 +84,21 @@ type Server struct {
|
||||
persistSyncResponse bool
|
||||
isSessionActive atomic.Bool
|
||||
|
||||
cpuProfileBuf *bytes.Buffer
|
||||
cpuProfiling bool
|
||||
|
||||
profileManager *profilemanager.ServiceManager
|
||||
profilesDisabled bool
|
||||
updateSettingsDisabled bool
|
||||
captureEnabled bool
|
||||
bundleCapture *bundleCapture
|
||||
// activeCapture is the session currently installed on the engine; guarded by s.mutex.
|
||||
activeCapture *capture.Session
|
||||
networksDisabled bool
|
||||
|
||||
// sleepTriggeredDown holds a state indicated if the sleep handler triggered the last client down
|
||||
sleepTriggeredDown atomic.Bool
|
||||
sleepHandler *sleephandler.SleepHandler
|
||||
|
||||
updateManager *updater.Manager
|
||||
|
||||
jwtCache *jwtCache
|
||||
}
|
||||
@@ -99,8 +111,8 @@ type oauthAuthFlow struct {
|
||||
}
|
||||
|
||||
// New server instance constructor.
|
||||
func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool) *Server {
|
||||
return &Server{
|
||||
func New(ctx context.Context, logFile string, configFile string, profilesDisabled bool, updateSettingsDisabled bool, captureEnabled bool, networksDisabled bool) *Server {
|
||||
s := &Server{
|
||||
rootCtx: ctx,
|
||||
logFile: logFile,
|
||||
persistSyncResponse: true,
|
||||
@@ -108,8 +120,15 @@ func New(ctx context.Context, logFile string, configFile string, profilesDisable
|
||||
profileManager: profilemanager.NewServiceManager(configFile),
|
||||
profilesDisabled: profilesDisabled,
|
||||
updateSettingsDisabled: updateSettingsDisabled,
|
||||
captureEnabled: captureEnabled,
|
||||
networksDisabled: networksDisabled,
|
||||
jwtCache: newJWTCache(),
|
||||
}
|
||||
agent := &serverAgent{s}
|
||||
s.sleepHandler = sleephandler.New(agent)
|
||||
s.startSleepDetector()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
@@ -130,6 +149,12 @@ func (s *Server) Start() error {
|
||||
log.Warnf(errRestoreResidualState, err)
|
||||
}
|
||||
|
||||
if s.updateManager == nil {
|
||||
stateMgr := statemanager.New(s.profileManager.GetStatePath())
|
||||
s.updateManager = updater.NewManager(s.statusRecorder, stateMgr)
|
||||
s.updateManager.CheckUpdateSuccess(s.rootCtx)
|
||||
}
|
||||
|
||||
// if current state contains any error, return it
|
||||
// in all other cases we can continue execution only if status is idle and up command was
|
||||
// not in the progress or already successfully established connection.
|
||||
@@ -145,10 +170,10 @@ func (s *Server) Start() error {
|
||||
ctx, cancel := context.WithCancel(s.rootCtx)
|
||||
s.actCancel = cancel
|
||||
|
||||
// set the default config if not exists
|
||||
if err := s.setDefaultConfigIfNotExists(ctx); err != nil {
|
||||
log.Errorf("failed to set default config: %v", err)
|
||||
return fmt.Errorf("failed to set default config: %w", err)
|
||||
// copy old default config
|
||||
_, err = s.profileManager.CopyDefaultProfileIfNotExists()
|
||||
if err != nil && !errors.Is(err, profilemanager.ErrorOldDefaultConfigNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
activeProf, err := s.profileManager.GetActiveProfileState()
|
||||
@@ -156,23 +181,11 @@ func (s *Server) Start() error {
|
||||
return fmt.Errorf("failed to get active profile state: %w", err)
|
||||
}
|
||||
|
||||
config, err := s.getConfig(activeProf)
|
||||
config, existingConfig, err := s.getConfig(activeProf)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get active profile config: %v", err)
|
||||
|
||||
if err := s.profileManager.SetActiveProfileState(&profilemanager.ActiveProfileState{
|
||||
Name: "default",
|
||||
Username: "",
|
||||
}); err != nil {
|
||||
log.Errorf("failed to set active profile state: %v", err)
|
||||
return fmt.Errorf("failed to set active profile state: %w", err)
|
||||
}
|
||||
|
||||
config, err = profilemanager.GetConfig(s.profileManager.DefaultProfilePath())
|
||||
if err != nil {
|
||||
log.Errorf("failed to get default profile config: %v", err)
|
||||
return fmt.Errorf("failed to get default profile config: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
s.config = config
|
||||
|
||||
@@ -186,44 +199,27 @@ func (s *Server) Start() error {
|
||||
}
|
||||
|
||||
if config.DisableAutoConnect {
|
||||
state.Set(internal.StatusIdle)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !existingConfig {
|
||||
log.Warnf("not trying to connect when configuration was just created")
|
||||
state.Set(internal.StatusNeedsLogin)
|
||||
return nil
|
||||
}
|
||||
|
||||
s.clientRunning = true
|
||||
s.clientRunningChan = make(chan struct{})
|
||||
s.clientGiveUpChan = make(chan struct{})
|
||||
go s.connectWithRetryRuns(ctx, config, s.statusRecorder, false, s.clientRunningChan, s.clientGiveUpChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) setDefaultConfigIfNotExists(ctx context.Context) error {
|
||||
ok, err := s.profileManager.CopyDefaultProfileIfNotExists()
|
||||
if err != nil {
|
||||
if err := s.profileManager.CreateDefaultProfile(); err != nil {
|
||||
log.Errorf("failed to create default profile: %v", err)
|
||||
return fmt.Errorf("failed to create default profile: %w", err)
|
||||
}
|
||||
|
||||
if err := s.profileManager.SetActiveProfileState(&profilemanager.ActiveProfileState{
|
||||
Name: "default",
|
||||
Username: "",
|
||||
}); err != nil {
|
||||
log.Errorf("failed to set active profile state: %v", err)
|
||||
return fmt.Errorf("failed to set active profile state: %w", err)
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
state := internal.CtxGetState(ctx)
|
||||
state.Set(internal.StatusNeedsLogin)
|
||||
}
|
||||
|
||||
go s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.clientRunningChan, s.clientGiveUpChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
// connectWithRetryRuns runs the client connection with a backoff strategy where we retry the operation as additional
|
||||
// mechanism to keep the client connected even when the connection is lost.
|
||||
// we cancel retry if the client receive a stop or down command, or if disable auto connect is configured.
|
||||
func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}, giveUpChan chan struct{}) {
|
||||
func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}, giveUpChan chan struct{}) {
|
||||
defer func() {
|
||||
s.mutex.Lock()
|
||||
s.clientRunning = false
|
||||
@@ -231,7 +227,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil
|
||||
}()
|
||||
|
||||
if s.config.DisableAutoConnect {
|
||||
if err := s.connect(ctx, s.config, s.statusRecorder, doInitialAutoUpdate, runningChan); err != nil {
|
||||
if err := s.connect(ctx, s.config, s.statusRecorder, runningChan); err != nil {
|
||||
log.Debugf("run client connection exited with error: %v", err)
|
||||
}
|
||||
log.Tracef("client connection exited")
|
||||
@@ -260,8 +256,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil
|
||||
}()
|
||||
|
||||
runOperation := func() error {
|
||||
err := s.connect(ctx, profileConfig, statusRecorder, doInitialAutoUpdate, runningChan)
|
||||
doInitialAutoUpdate = false
|
||||
err := s.connect(ctx, profileConfig, statusRecorder, runningChan)
|
||||
if err != nil {
|
||||
log.Debugf("run client connection exited with error: %v. Will retry in the background", err)
|
||||
return err
|
||||
@@ -282,10 +277,17 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil
|
||||
|
||||
// loginAttempt attempts to login using the provided information. it returns a status in case something fails
|
||||
func (s *Server) loginAttempt(ctx context.Context, setupKey, jwtToken string) (internal.StatusType, error) {
|
||||
var status internal.StatusType
|
||||
err := internal.Login(ctx, s.config, setupKey, jwtToken)
|
||||
authClient, err := auth.NewAuth(ctx, s.config.PrivateKey, s.config.ManagementURL, s.config)
|
||||
if err != nil {
|
||||
if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) {
|
||||
log.Errorf("failed to create auth client: %v", err)
|
||||
return internal.StatusLoginFailed, err
|
||||
}
|
||||
defer authClient.Close()
|
||||
|
||||
var status internal.StatusType
|
||||
err, isAuthError := authClient.Login(ctx, setupKey, jwtToken)
|
||||
if err != nil {
|
||||
if isAuthError {
|
||||
log.Warnf("failed login: %v", err)
|
||||
status = internal.StatusNeedsLogin
|
||||
} else {
|
||||
@@ -487,7 +489,7 @@ func (s *Server) Login(callerCtx context.Context, msg *proto.LoginRequest) (*pro
|
||||
|
||||
s.mutex.Unlock()
|
||||
|
||||
config, err := s.getConfig(activeProf)
|
||||
config, _, err := s.getConfig(activeProf)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get active profile config: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile config: %w", err)
|
||||
@@ -610,8 +612,7 @@ func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLogin
|
||||
s.oauthAuthFlow.waitCancel()
|
||||
}
|
||||
|
||||
waitTimeout := time.Until(s.oauthAuthFlow.expiresAt)
|
||||
waitCTX, cancel := context.WithTimeout(ctx, waitTimeout)
|
||||
waitCTX, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
s.mutex.Lock()
|
||||
@@ -659,8 +660,6 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
|
||||
return s.waitForUp(callerCtx)
|
||||
}
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if err := restoreResidualState(callerCtx, s.profileManager.GetStatePath()); err != nil {
|
||||
log.Warnf(errRestoreResidualState, err)
|
||||
}
|
||||
@@ -672,10 +671,12 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
// not in the progress or already successfully established connection.
|
||||
status, err := state.Status()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if status != internal.StatusIdle {
|
||||
s.mutex.Unlock()
|
||||
return nil, fmt.Errorf("up already in progress: current status %s", status)
|
||||
}
|
||||
|
||||
@@ -692,17 +693,20 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
s.actCancel = cancel
|
||||
|
||||
if s.config == nil {
|
||||
s.mutex.Unlock()
|
||||
return nil, fmt.Errorf("config is not defined, please call login command first")
|
||||
}
|
||||
|
||||
activeProf, err := s.profileManager.GetActiveProfileState()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to get active profile state: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile state: %w", err)
|
||||
}
|
||||
|
||||
if msg != nil && msg.ProfileName != nil {
|
||||
if err := s.switchProfileIfNeeded(*msg.ProfileName, msg.Username, activeProf); err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to switch profile: %v", err)
|
||||
return nil, fmt.Errorf("failed to switch profile: %w", err)
|
||||
}
|
||||
@@ -710,14 +714,16 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
|
||||
activeProf, err = s.profileManager.GetActiveProfileState()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to get active profile state: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile state: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("active profile: %s for %s", activeProf.Name, activeProf.Username)
|
||||
|
||||
config, err := s.getConfig(activeProf)
|
||||
config, _, err := s.getConfig(activeProf)
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to get active profile config: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile config: %w", err)
|
||||
}
|
||||
@@ -730,12 +736,9 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
s.clientRunningChan = make(chan struct{})
|
||||
s.clientGiveUpChan = make(chan struct{})
|
||||
|
||||
var doAutoUpdate bool
|
||||
if msg != nil && msg.AutoUpdate != nil && *msg.AutoUpdate {
|
||||
doAutoUpdate = true
|
||||
}
|
||||
go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, doAutoUpdate, s.clientRunningChan, s.clientGiveUpChan)
|
||||
go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, s.clientRunningChan, s.clientGiveUpChan)
|
||||
|
||||
s.mutex.Unlock()
|
||||
return s.waitForUp(callerCtx)
|
||||
}
|
||||
|
||||
@@ -811,7 +814,7 @@ func (s *Server) SwitchProfile(callerCtx context.Context, msg *proto.SwitchProfi
|
||||
log.Errorf("failed to get active profile state: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile state: %w", err)
|
||||
}
|
||||
config, err := s.getConfig(activeProf)
|
||||
config, _, err := s.getConfig(activeProf)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get default profile config: %v", err)
|
||||
return nil, fmt.Errorf("failed to get default profile config: %w", err)
|
||||
@@ -825,9 +828,11 @@ func (s *Server) SwitchProfile(callerCtx context.Context, msg *proto.SwitchProfi
|
||||
// Down engine work in the daemon.
|
||||
func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownResponse, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
giveUpChan := s.clientGiveUpChan
|
||||
|
||||
if err := s.cleanupConnection(); err != nil {
|
||||
s.mutex.Unlock()
|
||||
// todo review to update the status in case any type of error
|
||||
log.Errorf("failed to shut down properly: %v", err)
|
||||
return nil, err
|
||||
@@ -836,6 +841,20 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes
|
||||
state := internal.CtxGetState(s.rootCtx)
|
||||
state.Set(internal.StatusIdle)
|
||||
|
||||
s.mutex.Unlock()
|
||||
|
||||
// Wait for the connectWithRetryRuns goroutine to finish with a short timeout.
|
||||
// This prevents the goroutine from setting ErrResetConnection after Down() returns.
|
||||
// The giveUpChan is closed at the end of connectWithRetryRuns.
|
||||
if giveUpChan != nil {
|
||||
select {
|
||||
case <-giveUpChan:
|
||||
log.Debugf("client goroutine finished successfully")
|
||||
case <-time.After(5 * time.Second):
|
||||
log.Warnf("timeout waiting for client goroutine to finish, proceeding anyway")
|
||||
}
|
||||
}
|
||||
|
||||
return &proto.DownResponse{}, nil
|
||||
}
|
||||
|
||||
@@ -845,14 +864,26 @@ func (s *Server) cleanupConnection() error {
|
||||
if s.actCancel == nil {
|
||||
return ErrServiceNotUp
|
||||
}
|
||||
|
||||
// Capture the engine reference before cancelling the context.
|
||||
// After actCancel(), the connectWithRetryRuns goroutine wakes up
|
||||
// and sets connectClient.engine = nil, causing connectClient.Stop()
|
||||
// to skip the engine shutdown entirely.
|
||||
var engine *internal.Engine
|
||||
if s.connectClient != nil {
|
||||
engine = s.connectClient.Engine()
|
||||
}
|
||||
|
||||
s.actCancel()
|
||||
|
||||
if s.connectClient == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.connectClient.Stop(); err != nil {
|
||||
return err
|
||||
if engine != nil {
|
||||
if err := engine.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.connectClient = nil
|
||||
@@ -908,7 +939,7 @@ func (s *Server) handleActiveProfileLogout(ctx context.Context) (*proto.LogoutRe
|
||||
return nil, gstatus.Errorf(codes.FailedPrecondition, "failed to get active profile state: %v", err)
|
||||
}
|
||||
|
||||
config, err := s.getConfig(activeProf)
|
||||
config, _, err := s.getConfig(activeProf)
|
||||
if err != nil {
|
||||
return nil, gstatus.Errorf(codes.FailedPrecondition, "not logged in")
|
||||
}
|
||||
@@ -932,19 +963,24 @@ func (s *Server) handleActiveProfileLogout(ctx context.Context) (*proto.LogoutRe
|
||||
return &proto.LogoutResponse{}, nil
|
||||
}
|
||||
|
||||
// getConfig loads the config from the active profile
|
||||
func (s *Server) getConfig(activeProf *profilemanager.ActiveProfileState) (*profilemanager.Config, error) {
|
||||
// GetConfig reads config file and returns Config and whether the config file already existed. Errors out if it does not exist
|
||||
func (s *Server) getConfig(activeProf *profilemanager.ActiveProfileState) (*profilemanager.Config, bool, error) {
|
||||
cfgPath, err := activeProf.FilePath()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get active profile file path: %w", err)
|
||||
return nil, false, fmt.Errorf("failed to get active profile file path: %w", err)
|
||||
}
|
||||
|
||||
config, err := profilemanager.GetConfig(cfgPath)
|
||||
_, err = os.Stat(cfgPath)
|
||||
configExisted := !os.IsNotExist(err)
|
||||
|
||||
log.Infof("active profile config existed: %t, err %v", configExisted, err)
|
||||
|
||||
config, err := profilemanager.ReadConfig(cfgPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get config: %w", err)
|
||||
return nil, false, fmt.Errorf("failed to get config: %w", err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
return config, configExisted, nil
|
||||
}
|
||||
|
||||
func (s *Server) canRemoveProfile(profileName string) error {
|
||||
@@ -1091,11 +1127,9 @@ func (s *Server) Status(
|
||||
if msg.GetFullPeerStatus {
|
||||
s.runProbes(msg.ShouldRunProbes)
|
||||
fullStatus := s.statusRecorder.GetFullStatus()
|
||||
pbFullStatus := toProtoFullStatus(fullStatus)
|
||||
pbFullStatus := fullStatus.ToProto()
|
||||
pbFullStatus.Events = s.statusRecorder.GetEventHistory()
|
||||
|
||||
pbFullStatus.SshServerState = s.getSSHServerState()
|
||||
|
||||
statusResponse.FullStatus = pbFullStatus
|
||||
}
|
||||
|
||||
@@ -1128,6 +1162,7 @@ func (s *Server) getSSHServerState() *proto.SSHServerState {
|
||||
RemoteAddress: session.RemoteAddress,
|
||||
Command: session.Command,
|
||||
JwtUsername: session.JWTUsername,
|
||||
PortForwards: session.PortForwards,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1315,6 +1350,65 @@ func (s *Server) WaitJWTToken(
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExposeService exposes a local port via the NetBird reverse proxy.
|
||||
func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.DaemonService_ExposeServiceServer) error {
|
||||
s.mutex.Lock()
|
||||
if !s.clientRunning {
|
||||
s.mutex.Unlock()
|
||||
return gstatus.Errorf(codes.FailedPrecondition, "client is not running, run 'netbird up' first")
|
||||
}
|
||||
connectClient := s.connectClient
|
||||
s.mutex.Unlock()
|
||||
|
||||
if connectClient == nil {
|
||||
return gstatus.Errorf(codes.FailedPrecondition, "client not initialized")
|
||||
}
|
||||
|
||||
engine := connectClient.Engine()
|
||||
if engine == nil {
|
||||
return gstatus.Errorf(codes.FailedPrecondition, "engine not initialized")
|
||||
}
|
||||
|
||||
if engine.IsBlockInbound() {
|
||||
return gstatus.Errorf(codes.FailedPrecondition, "expose requires inbound connections but 'block inbound' is enabled, disable it first")
|
||||
}
|
||||
|
||||
mgr := engine.GetExposeManager()
|
||||
if mgr == nil {
|
||||
return gstatus.Errorf(codes.Internal, "expose manager not available")
|
||||
}
|
||||
|
||||
ctx := srv.Context()
|
||||
|
||||
exposeCtx, exposeCancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer exposeCancel()
|
||||
|
||||
mgmReq := expose.NewRequest(req)
|
||||
result, err := mgr.Expose(exposeCtx, *mgmReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := srv.Send(&proto.ExposeServiceEvent{
|
||||
Event: &proto.ExposeServiceEvent_Ready{
|
||||
Ready: &proto.ExposeServiceReady{
|
||||
ServiceName: result.ServiceName,
|
||||
ServiceUrl: result.ServiceURL,
|
||||
Domain: result.Domain,
|
||||
PortAutoAssigned: result.PortAutoAssigned,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mgr.KeepAlive(ctx, result.Domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isUnixRunningDesktop() bool {
|
||||
if runtime.GOOS != "linux" && runtime.GOOS != "freebsd" {
|
||||
return false
|
||||
@@ -1336,6 +1430,10 @@ func (s *Server) runProbes(waitForProbeResult bool) {
|
||||
if engine.RunHealthProbes(waitForProbeResult) {
|
||||
s.lastProbe = time.Now()
|
||||
}
|
||||
} else {
|
||||
if err := s.statusRecorder.RefreshWireGuardStats(); err != nil {
|
||||
log.Debugf("failed to refresh WireGuard stats: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1542,16 +1640,23 @@ func (s *Server) GetFeatures(ctx context.Context, msg *proto.GetFeaturesRequest)
|
||||
features := &proto.GetFeaturesResponse{
|
||||
DisableProfiles: s.checkProfilesDisabled(),
|
||||
DisableUpdateSettings: s.checkUpdateSettingsDisabled(),
|
||||
DisableNetworks: s.networksDisabled,
|
||||
}
|
||||
|
||||
return features, nil
|
||||
}
|
||||
|
||||
func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}) error {
|
||||
func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, runningChan chan struct{}) error {
|
||||
log.Tracef("running client connection")
|
||||
s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate)
|
||||
s.connectClient.SetSyncResponsePersistence(s.persistSyncResponse)
|
||||
if err := s.connectClient.Run(runningChan); err != nil {
|
||||
client := internal.NewConnectClient(ctx, config, statusRecorder)
|
||||
client.SetUpdateManager(s.updateManager)
|
||||
client.SetSyncResponsePersistence(s.persistSyncResponse)
|
||||
|
||||
s.mutex.Lock()
|
||||
s.connectClient = client
|
||||
s.mutex.Unlock()
|
||||
|
||||
if err := client.Run(runningChan, s.logFile); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -1575,6 +1680,14 @@ func (s *Server) checkUpdateSettingsDisabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Server) startUpdateManagerForGUI() {
|
||||
if s.updateManager == nil {
|
||||
return
|
||||
}
|
||||
s.updateManager.Start(s.rootCtx)
|
||||
s.updateManager.NotifyUI()
|
||||
}
|
||||
|
||||
func (s *Server) onSessionExpire() {
|
||||
if runtime.GOOS != "windows" {
|
||||
isUIActive := internal.CheckUIApp()
|
||||
@@ -1625,94 +1738,6 @@ func parseEnvDuration(envVar string, defaultDuration time.Duration) time.Duratio
|
||||
return defaultDuration
|
||||
}
|
||||
|
||||
func toProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus {
|
||||
pbFullStatus := proto.FullStatus{
|
||||
ManagementState: &proto.ManagementState{},
|
||||
SignalState: &proto.SignalState{},
|
||||
LocalPeerState: &proto.LocalPeerState{},
|
||||
Peers: []*proto.PeerState{},
|
||||
}
|
||||
|
||||
pbFullStatus.ManagementState.URL = fullStatus.ManagementState.URL
|
||||
pbFullStatus.ManagementState.Connected = fullStatus.ManagementState.Connected
|
||||
if err := fullStatus.ManagementState.Error; err != nil {
|
||||
pbFullStatus.ManagementState.Error = err.Error()
|
||||
}
|
||||
|
||||
pbFullStatus.SignalState.URL = fullStatus.SignalState.URL
|
||||
pbFullStatus.SignalState.Connected = fullStatus.SignalState.Connected
|
||||
if err := fullStatus.SignalState.Error; err != nil {
|
||||
pbFullStatus.SignalState.Error = err.Error()
|
||||
}
|
||||
|
||||
pbFullStatus.LocalPeerState.IP = fullStatus.LocalPeerState.IP
|
||||
pbFullStatus.LocalPeerState.PubKey = fullStatus.LocalPeerState.PubKey
|
||||
pbFullStatus.LocalPeerState.KernelInterface = fullStatus.LocalPeerState.KernelInterface
|
||||
pbFullStatus.LocalPeerState.Fqdn = fullStatus.LocalPeerState.FQDN
|
||||
pbFullStatus.LocalPeerState.RosenpassPermissive = fullStatus.RosenpassState.Permissive
|
||||
pbFullStatus.LocalPeerState.RosenpassEnabled = fullStatus.RosenpassState.Enabled
|
||||
pbFullStatus.LocalPeerState.Networks = maps.Keys(fullStatus.LocalPeerState.Routes)
|
||||
pbFullStatus.NumberOfForwardingRules = int32(fullStatus.NumOfForwardingRules)
|
||||
pbFullStatus.LazyConnectionEnabled = fullStatus.LazyConnectionEnabled
|
||||
|
||||
for _, peerState := range fullStatus.Peers {
|
||||
pbPeerState := &proto.PeerState{
|
||||
IP: peerState.IP,
|
||||
PubKey: peerState.PubKey,
|
||||
ConnStatus: peerState.ConnStatus.String(),
|
||||
ConnStatusUpdate: timestamppb.New(peerState.ConnStatusUpdate),
|
||||
Relayed: peerState.Relayed,
|
||||
LocalIceCandidateType: peerState.LocalIceCandidateType,
|
||||
RemoteIceCandidateType: peerState.RemoteIceCandidateType,
|
||||
LocalIceCandidateEndpoint: peerState.LocalIceCandidateEndpoint,
|
||||
RemoteIceCandidateEndpoint: peerState.RemoteIceCandidateEndpoint,
|
||||
RelayAddress: peerState.RelayServerAddress,
|
||||
Fqdn: peerState.FQDN,
|
||||
LastWireguardHandshake: timestamppb.New(peerState.LastWireguardHandshake),
|
||||
BytesRx: peerState.BytesRx,
|
||||
BytesTx: peerState.BytesTx,
|
||||
RosenpassEnabled: peerState.RosenpassEnabled,
|
||||
Networks: maps.Keys(peerState.GetRoutes()),
|
||||
Latency: durationpb.New(peerState.Latency),
|
||||
SshHostKey: peerState.SSHHostKey,
|
||||
}
|
||||
pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState)
|
||||
}
|
||||
|
||||
for _, relayState := range fullStatus.Relays {
|
||||
pbRelayState := &proto.RelayState{
|
||||
URI: relayState.URI,
|
||||
Available: relayState.Err == nil,
|
||||
}
|
||||
if err := relayState.Err; err != nil {
|
||||
pbRelayState.Error = err.Error()
|
||||
}
|
||||
pbFullStatus.Relays = append(pbFullStatus.Relays, pbRelayState)
|
||||
}
|
||||
|
||||
for _, dnsState := range fullStatus.NSGroupStates {
|
||||
var err string
|
||||
if dnsState.Error != nil {
|
||||
err = dnsState.Error.Error()
|
||||
}
|
||||
|
||||
var servers []string
|
||||
for _, server := range dnsState.Servers {
|
||||
servers = append(servers, server.String())
|
||||
}
|
||||
|
||||
pbDnsState := &proto.NSGroupState{
|
||||
Servers: servers,
|
||||
Domains: dnsState.Domains,
|
||||
Enabled: dnsState.Enabled,
|
||||
Error: err,
|
||||
}
|
||||
pbFullStatus.DnsServers = append(pbFullStatus.DnsServers, pbDnsState)
|
||||
}
|
||||
|
||||
return &pbFullStatus
|
||||
}
|
||||
|
||||
// sendTerminalNotification sends a terminal notification message
|
||||
// to inform the user that the NetBird connection session has expired.
|
||||
func sendTerminalNotification() error {
|
||||
|
||||
187
client/server/server_connect_test.go
Normal file
187
client/server/server_connect_test.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
func newTestServer() *Server {
|
||||
return &Server{
|
||||
rootCtx: context.Background(),
|
||||
statusRecorder: peer.NewRecorder(""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDummyConnectClient(ctx context.Context) *internal.ConnectClient {
|
||||
return internal.NewConnectClient(ctx, nil, nil)
|
||||
}
|
||||
|
||||
// TestConnectSetsClientWithMutex validates that connect() sets s.connectClient
|
||||
// under mutex protection so concurrent readers see a consistent value.
|
||||
func TestConnectSetsClientWithMutex(t *testing.T) {
|
||||
s := newTestServer()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Manually simulate what connect() does (without calling Run which panics without full setup)
|
||||
client := newDummyConnectClient(ctx)
|
||||
|
||||
s.mutex.Lock()
|
||||
s.connectClient = client
|
||||
s.mutex.Unlock()
|
||||
|
||||
// Verify the assignment is visible under mutex
|
||||
s.mutex.Lock()
|
||||
assert.Equal(t, client, s.connectClient, "connectClient should be set")
|
||||
s.mutex.Unlock()
|
||||
}
|
||||
|
||||
// TestConcurrentConnectClientAccess validates that concurrent reads of
|
||||
// s.connectClient under mutex don't race with a write.
|
||||
func TestConcurrentConnectClientAccess(t *testing.T) {
|
||||
s := newTestServer()
|
||||
ctx := context.Background()
|
||||
client := newDummyConnectClient(ctx)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
nilCount := 0
|
||||
setCount := 0
|
||||
var mu sync.Mutex
|
||||
|
||||
// Start readers
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.mutex.Lock()
|
||||
c := s.connectClient
|
||||
s.mutex.Unlock()
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if c == nil {
|
||||
nilCount++
|
||||
} else {
|
||||
setCount++
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Simulate connect() writing under mutex
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
s.mutex.Lock()
|
||||
s.connectClient = client
|
||||
s.mutex.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, 50, nilCount+setCount, "all goroutines should complete without panic")
|
||||
}
|
||||
|
||||
// TestCleanupConnection_ClearsConnectClient validates that cleanupConnection
|
||||
// properly nils out connectClient.
|
||||
func TestCleanupConnection_ClearsConnectClient(t *testing.T) {
|
||||
s := newTestServer()
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
s.actCancel = cancel
|
||||
|
||||
s.connectClient = newDummyConnectClient(context.Background())
|
||||
s.clientRunning = true
|
||||
|
||||
err := s.cleanupConnection()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup")
|
||||
}
|
||||
|
||||
// TestCleanState_NilConnectClient validates that CleanState doesn't panic
|
||||
// when connectClient is nil.
|
||||
func TestCleanState_NilConnectClient(t *testing.T) {
|
||||
s := newTestServer()
|
||||
s.connectClient = nil
|
||||
s.profileManager = nil // will cause error if it tries to proceed past the nil check
|
||||
|
||||
// Should not panic — the nil check should prevent calling Status() on nil
|
||||
assert.NotPanics(t, func() {
|
||||
_, _ = s.CleanState(context.Background(), &proto.CleanStateRequest{All: true})
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteState_NilConnectClient validates that DeleteState doesn't panic
|
||||
// when connectClient is nil.
|
||||
func TestDeleteState_NilConnectClient(t *testing.T) {
|
||||
s := newTestServer()
|
||||
s.connectClient = nil
|
||||
s.profileManager = nil
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
_, _ = s.DeleteState(context.Background(), &proto.DeleteStateRequest{All: true})
|
||||
})
|
||||
}
|
||||
|
||||
// TestDownThenUp_StaleRunningChan documents the known state issue where
|
||||
// clientRunningChan from a previous connection is already closed, causing
|
||||
// waitForUp() to return immediately on reconnect.
|
||||
func TestDownThenUp_StaleRunningChan(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
// Simulate state after a successful connection
|
||||
s.clientRunning = true
|
||||
s.clientRunningChan = make(chan struct{})
|
||||
close(s.clientRunningChan) // closed when engine started
|
||||
s.clientGiveUpChan = make(chan struct{})
|
||||
s.connectClient = newDummyConnectClient(context.Background())
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
s.actCancel = cancel
|
||||
|
||||
// Simulate Down(): cleanupConnection sets connectClient = nil
|
||||
s.mutex.Lock()
|
||||
err := s.cleanupConnection()
|
||||
s.mutex.Unlock()
|
||||
require.NoError(t, err)
|
||||
|
||||
// After cleanup: connectClient is nil, clientRunning still true
|
||||
// (goroutine hasn't exited yet)
|
||||
s.mutex.Lock()
|
||||
assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup")
|
||||
assert.True(t, s.clientRunning, "clientRunning still true until goroutine exits")
|
||||
s.mutex.Unlock()
|
||||
|
||||
// waitForUp() returns immediately due to stale closed clientRunningChan
|
||||
ctx, ctxCancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer ctxCancel()
|
||||
|
||||
waitDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := s.waitForUp(ctx)
|
||||
waitDone <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-waitDone:
|
||||
assert.NoError(t, err, "waitForUp returns success on stale channel")
|
||||
// But connectClient is still nil — this is the stale state issue
|
||||
s.mutex.Lock()
|
||||
assert.Nil(t, s.connectClient, "connectClient is nil despite waitForUp success")
|
||||
s.mutex.Unlock()
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("waitForUp should have returned immediately due to stale closed channel")
|
||||
}
|
||||
}
|
||||
|
||||
// TestConnectClient_EngineNilOnFreshClient validates that a newly created
|
||||
// ConnectClient has nil Engine (before Run is called).
|
||||
func TestConnectClient_EngineNilOnFreshClient(t *testing.T) {
|
||||
client := newDummyConnectClient(context.Background())
|
||||
assert.Nil(t, client.Engine(), "engine should be nil on fresh ConnectClient")
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/netbirdio/netbird/management/internals/modules/peers"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
"github.com/netbirdio/netbird/management/server/job"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/server/config"
|
||||
"github.com/netbirdio/netbird/management/server/groups"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
daemonProto "github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/management/server"
|
||||
"github.com/netbirdio/netbird/management/server/activity"
|
||||
nbcache "github.com/netbirdio/netbird/management/server/cache"
|
||||
"github.com/netbirdio/netbird/management/server/integrations/port_forwarding"
|
||||
"github.com/netbirdio/netbird/management/server/permissions"
|
||||
"github.com/netbirdio/netbird/management/server/settings"
|
||||
@@ -102,7 +104,7 @@ func TestConnectWithRetryRuns(t *testing.T) {
|
||||
t.Fatalf("failed to set active profile state: %v", err)
|
||||
}
|
||||
|
||||
s := New(ctx, "debug", "", false, false)
|
||||
s := New(ctx, "debug", "", false, false, false, false)
|
||||
|
||||
s.config = config
|
||||
|
||||
@@ -112,7 +114,7 @@ func TestConnectWithRetryRuns(t *testing.T) {
|
||||
t.Setenv(maxRetryTimeVar, "5s")
|
||||
t.Setenv(retryMultiplierVar, "1")
|
||||
|
||||
s.connectWithRetryRuns(ctx, config, s.statusRecorder, false, nil, nil)
|
||||
s.connectWithRetryRuns(ctx, config, s.statusRecorder, nil, nil)
|
||||
if counter < 3 {
|
||||
t.Fatalf("expected counter > 2, got %d", counter)
|
||||
}
|
||||
@@ -163,7 +165,7 @@ func TestServer_Up(t *testing.T) {
|
||||
t.Fatalf("failed to set active profile state: %v", err)
|
||||
}
|
||||
|
||||
s := New(ctx, "console", "", false, false)
|
||||
s := New(ctx, "console", "", false, false, false, false)
|
||||
err = s.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -233,7 +235,7 @@ func TestServer_SubcribeEvents(t *testing.T) {
|
||||
t.Fatalf("failed to set active profile state: %v", err)
|
||||
}
|
||||
|
||||
s := New(ctx, "console", "", false, false)
|
||||
s := New(ctx, "console", "", false, false, false, false)
|
||||
|
||||
err = s.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -306,7 +308,14 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve
|
||||
peersManager := peers.NewManager(store, permissionsManagerMock)
|
||||
settingsManagerMock := settings.NewMockManager(ctrl)
|
||||
|
||||
ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, settingsManagerMock, eventStore)
|
||||
jobManager := job.NewJobManager(nil, store, peersManager)
|
||||
|
||||
cacheStore, err := nbcache.NewStore(context.Background(), 100*time.Millisecond, 300*time.Millisecond, 100)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, settingsManagerMock, eventStore, cacheStore)
|
||||
|
||||
metrics, err := telemetry.NewDefaultAppMetrics(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -317,7 +326,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve
|
||||
requestBuffer := server.NewAccountRequestBuffer(context.Background(), store)
|
||||
peersUpdateManager := update_channel.NewPeersUpdateManager(metrics)
|
||||
networkMapController := controller.NewController(context.Background(), store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config)
|
||||
accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false)
|
||||
accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false, cacheStore)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -326,7 +335,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController)
|
||||
mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestSetConfig_AllFieldsSaved(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
s := New(ctx, "console", "", false, false)
|
||||
s := New(ctx, "console", "", false, false, false, false)
|
||||
|
||||
rosenpassEnabled := true
|
||||
rosenpassPermissive := true
|
||||
|
||||
93
client/server/sleep.go
Normal file
93
client/server/sleep.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/sleep"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
const envDisableSleepDetector = "NB_DISABLE_SLEEP_DETECTOR"
|
||||
|
||||
// serverAgent adapts Server to the handler.Agent and handler.StatusChecker interfaces
|
||||
type serverAgent struct {
|
||||
s *Server
|
||||
}
|
||||
|
||||
func (a *serverAgent) Up(ctx context.Context) error {
|
||||
_, err := a.s.Up(ctx, &proto.UpRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *serverAgent) Down(ctx context.Context) error {
|
||||
_, err := a.s.Down(ctx, &proto.DownRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *serverAgent) Status() (internal.StatusType, error) {
|
||||
return internal.CtxGetState(a.s.rootCtx).Status()
|
||||
}
|
||||
|
||||
// startSleepDetector starts the OS sleep/wake detector and forwards events to
|
||||
// the sleep handler. On platforms without a supported detector the attempt
|
||||
// logs a warning and returns. Setting NB_DISABLE_SLEEP_DETECTOR=true skips
|
||||
// registration entirely.
|
||||
func (s *Server) startSleepDetector() {
|
||||
if sleepDetectorDisabled() {
|
||||
log.Info("sleep detection disabled via " + envDisableSleepDetector)
|
||||
return
|
||||
}
|
||||
|
||||
svc, err := sleep.New()
|
||||
if err != nil {
|
||||
log.Warnf("failed to initialize sleep detection: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = svc.Register(func(event sleep.EventType) {
|
||||
switch event {
|
||||
case sleep.EventTypeSleep:
|
||||
log.Info("handling sleep event")
|
||||
if err := s.sleepHandler.HandleSleep(s.rootCtx); err != nil {
|
||||
log.Errorf("failed to handle sleep event: %v", err)
|
||||
}
|
||||
case sleep.EventTypeWakeUp:
|
||||
log.Info("handling wakeup event")
|
||||
if err := s.sleepHandler.HandleWakeUp(s.rootCtx); err != nil {
|
||||
log.Errorf("failed to handle wakeup event: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("failed to register sleep detector: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("sleep detection service initialized")
|
||||
|
||||
go func() {
|
||||
<-s.rootCtx.Done()
|
||||
log.Info("stopping sleep event listener")
|
||||
if err := svc.Deregister(); err != nil {
|
||||
log.Errorf("failed to deregister sleep detector: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func sleepDetectorDisabled() bool {
|
||||
val := os.Getenv(envDisableSleepDetector)
|
||||
if val == "" {
|
||||
return false
|
||||
}
|
||||
disabled, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
log.Warnf("failed to parse %s=%q: %v", envDisableSleepDetector, val, err)
|
||||
return false
|
||||
}
|
||||
return disabled
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||
"github.com/netbirdio/netbird/client/internal/statemanager"
|
||||
nbnet "github.com/netbirdio/netbird/client/net"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
@@ -39,7 +38,7 @@ func (s *Server) ListStates(_ context.Context, _ *proto.ListStatesRequest) (*pro
|
||||
|
||||
// CleanState handles cleaning of states (performing cleanup operations)
|
||||
func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (*proto.CleanStateResponse, error) {
|
||||
if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting {
|
||||
if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.")
|
||||
}
|
||||
|
||||
@@ -82,7 +81,7 @@ func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (
|
||||
|
||||
// DeleteState handles deletion of states without cleanup
|
||||
func (s *Server) DeleteState(ctx context.Context, req *proto.DeleteStateRequest) (*proto.DeleteStateResponse, error) {
|
||||
if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting {
|
||||
if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.")
|
||||
}
|
||||
|
||||
@@ -138,10 +137,8 @@ func restoreResidualState(ctx context.Context, statePath string) error {
|
||||
}
|
||||
|
||||
// clean up any remaining routes independently of the state file
|
||||
if !nbnet.AdvancedRouting() {
|
||||
if err := systemops.New(nil, nil).FlushMarkedRoutes(); err != nil {
|
||||
merr = multierror.Append(merr, fmt.Errorf("flush marked routes: %w", err))
|
||||
}
|
||||
if err := systemops.New(nil, nil).FlushMarkedRoutes(); err != nil {
|
||||
merr = multierror.Append(merr, fmt.Errorf("flush marked routes: %w", err))
|
||||
}
|
||||
|
||||
return nberrors.FormatErrorOrNil(merr)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/netbirdio/netbird/client/ssh/config"
|
||||
)
|
||||
|
||||
// registerStates registers all states that need crash recovery cleanup.
|
||||
func registerStates(mgr *statemanager.Manager) {
|
||||
mgr.RegisterState(&dns.ShutdownState{})
|
||||
mgr.RegisterState(&systemops.ShutdownState{})
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/netbirdio/netbird/client/ssh/config"
|
||||
)
|
||||
|
||||
// registerStates registers all states that need crash recovery cleanup.
|
||||
func registerStates(mgr *statemanager.Manager) {
|
||||
mgr.RegisterState(&dns.ShutdownState{})
|
||||
mgr.RegisterState(&systemops.ShutdownState{})
|
||||
|
||||
24
client/server/triggerupdate.go
Normal file
24
client/server/triggerupdate.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
// TriggerUpdate initiates installation of the pending enforced version.
|
||||
// It is called when the user clicks the install button in the UI (Mode 2 / enforced update).
|
||||
func (s *Server) TriggerUpdate(ctx context.Context, _ *proto.TriggerUpdateRequest) (*proto.TriggerUpdateResponse, error) {
|
||||
if s.updateManager == nil {
|
||||
return &proto.TriggerUpdateResponse{Success: false, ErrorMsg: "update manager not available"}, nil
|
||||
}
|
||||
|
||||
if err := s.updateManager.Install(ctx); err != nil {
|
||||
log.Warnf("TriggerUpdate failed: %v", err)
|
||||
return &proto.TriggerUpdateResponse{Success: false, ErrorMsg: err.Error()}, nil
|
||||
}
|
||||
|
||||
return &proto.TriggerUpdateResponse{Success: true}, nil
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/installer"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user