diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9ce779dbb..19a3a01e0 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: codespell uses: codespell-project/actions-codespell@v2 with: - ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros + ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans skip: go.mod,go.sum golangci: strategy: diff --git a/client/Dockerfile b/client/Dockerfile index 5cd459357..2ff0cca19 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -4,7 +4,7 @@ # sudo podman build -t localhost/netbird:latest -f client/Dockerfile --ignorefile .dockerignore-client . # sudo podman run --rm -it --cap-add={BPF,NET_ADMIN,NET_RAW} localhost/netbird:latest -FROM alpine:3.22.2 +FROM alpine:3.23.2 # iproute2: busybox doesn't display ip rules properly RUN apk add --no-cache \ bash \ diff --git a/client/android/login.go b/client/android/login.go index 4d4c7a650..a9422cdbf 100644 --- a/client/android/login.go +++ b/client/android/login.go @@ -3,15 +3,7 @@ package android import ( "context" "fmt" - "time" - "github.com/cenkalti/backoff/v4" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - gstatus "google.golang.org/grpc/status" - - "github.com/netbirdio/netbird/client/cmd" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/system" @@ -84,34 +76,21 @@ func (a *Auth) SaveConfigIfSSOSupported(listener SSOListener) { } func (a *Auth) saveConfigIfSSOSupported() (bool, error) { - supportsSSO := true - err := a.withBackOff(a.ctx, func() (err error) { - _, err = internal.GetPKCEAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL, nil) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { - _, err = internal.GetDeviceAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL) - s, ok := gstatus.FromError(err) - if !ok { - return err - } - if s.Code() == codes.NotFound || s.Code() == codes.Unimplemented { - supportsSSO = false - err = nil - } + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return false, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() - return err - } - - return err - }) + supportsSSO, err := authClient.IsSSOSupported(a.ctx) + if err != nil { + return false, fmt.Errorf("failed to check SSO support: %v", err) + } if !supportsSSO { return false, nil } - if err != nil { - return false, fmt.Errorf("backoff cycle failed: %v", err) - } - err = profilemanager.WriteOutConfig(a.cfgPath, a.config) return true, err } @@ -129,19 +108,17 @@ func (a *Auth) LoginWithSetupKeyAndSaveConfig(resultListener ErrListener, setupK } func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string) error { + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + //nolint ctxWithValues := context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) - - err := a.withBackOff(a.ctx, func() error { - backoffErr := internal.Login(ctxWithValues, a.config, setupKey, "") - if s, ok := gstatus.FromError(backoffErr); ok && (s.Code() == codes.PermissionDenied) { - // we got an answer from management, exit backoff earlier - return backoff.Permanent(backoffErr) - } - return backoffErr - }) + err, _ = authClient.Login(ctxWithValues, setupKey, "") if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } return profilemanager.WriteOutConfig(a.cfgPath, a.config) @@ -160,49 +137,41 @@ func (a *Auth) Login(resultListener ErrListener, urlOpener URLOpener, isAndroidT } func (a *Auth) login(urlOpener URLOpener, isAndroidTV bool) error { - var needsLogin bool + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() // check if we need to generate JWT token - err := a.withBackOff(a.ctx, func() (err error) { - needsLogin, err = internal.IsLoginRequired(a.ctx, a.config) - return - }) + needsLogin, err := authClient.IsLoginRequired(a.ctx) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("failed to check login requirement: %v", err) } jwtToken := "" if needsLogin { - tokenInfo, err := a.foregroundGetTokenInfo(urlOpener, isAndroidTV) + tokenInfo, err := a.foregroundGetTokenInfo(authClient, urlOpener, isAndroidTV) if err != nil { return fmt.Errorf("interactive sso login failed: %v", err) } jwtToken = tokenInfo.GetTokenToUse() } - err = a.withBackOff(a.ctx, func() error { - err := internal.Login(a.ctx, a.config, "", jwtToken) - - if err == nil { - go urlOpener.OnLoginSuccess() - } - - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - return nil - } - return err - }) + err, _ = authClient.Login(a.ctx, "", jwtToken) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } + go urlOpener.OnLoginSuccess() + return nil } -func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, isAndroidTV bool) (*auth.TokenInfo, error) { - oAuthFlow, err := auth.NewOAuthFlow(a.ctx, a.config, false, isAndroidTV, "") +func (a *Auth) foregroundGetTokenInfo(authClient *auth.Auth, urlOpener URLOpener, isAndroidTV bool) (*auth.TokenInfo, error) { + oAuthFlow, err := authClient.GetOAuthFlow(a.ctx, isAndroidTV) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get OAuth flow: %v", err) } flowInfo, err := oAuthFlow.RequestAuthInfo(context.TODO()) @@ -212,22 +181,10 @@ func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, isAndroidTV bool) (*a go urlOpener.Open(flowInfo.VerificationURIComplete, flowInfo.UserCode) - waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second - waitCTX, cancel := context.WithTimeout(a.ctx, waitTimeout) - defer cancel() - tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + tokenInfo, err := oAuthFlow.WaitToken(a.ctx, flowInfo) if err != nil { return nil, fmt.Errorf("waiting for browser login failed: %v", err) } return &tokenInfo, nil } - -func (a *Auth) withBackOff(ctx context.Context, bf func() error) error { - return backoff.RetryNotify( - bf, - backoff.WithContext(cmd.CLIBackOffSettings, ctx), - func(err error, duration time.Duration) { - log.Warnf("retrying Login to the Management service in %v due to error %v", duration, err) - }) -} diff --git a/client/cmd/debug.go b/client/cmd/debug.go index e56f66103..e480df4d7 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -16,7 +16,6 @@ import ( "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/server" - nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/upload-server/types" ) @@ -98,7 +97,6 @@ func debugBundle(cmd *cobra.Command, _ []string) error { client := proto.NewDaemonServiceClient(conn) request := &proto.DebugBundleRequest{ Anonymize: anonymizeFlag, - Status: getStatusOutput(cmd, anonymizeFlag), SystemInfo: systemInfoFlag, LogFileCount: logFileCount, } @@ -221,21 +219,37 @@ func runForDuration(cmd *cobra.Command, args []string) error { time.Sleep(3 * time.Second) - headerPostUp := fmt.Sprintf("----- NetBird post-up - Timestamp: %s", time.Now().Format(time.RFC3339)) - statusOutput := fmt.Sprintf("%s\n%s", headerPostUp, getStatusOutput(cmd, anonymizeFlag)) + cpuProfilingStarted := false + if _, err := client.StartCPUProfile(cmd.Context(), &proto.StartCPUProfileRequest{}); err != nil { + cmd.PrintErrf("Failed to start CPU profiling: %v\n", err) + } else { + cpuProfilingStarted = true + defer func() { + if cpuProfilingStarted { + if _, err := client.StopCPUProfile(cmd.Context(), &proto.StopCPUProfileRequest{}); err != nil { + cmd.PrintErrf("Failed to stop CPU profiling: %v\n", err) + } + } + }() + } if waitErr := waitForDurationOrCancel(cmd.Context(), duration, cmd); waitErr != nil { return waitErr } cmd.Println("\nDuration completed") + if cpuProfilingStarted { + if _, err := client.StopCPUProfile(cmd.Context(), &proto.StopCPUProfileRequest{}); err != nil { + cmd.PrintErrf("Failed to stop CPU profiling: %v\n", err) + } else { + cpuProfilingStarted = false + } + } + cmd.Println("Creating debug bundle...") - headerPreDown := fmt.Sprintf("----- NetBird pre-down - Timestamp: %s - Duration: %s", time.Now().Format(time.RFC3339), duration) - statusOutput = fmt.Sprintf("%s\n%s\n%s", statusOutput, headerPreDown, getStatusOutput(cmd, anonymizeFlag)) request := &proto.DebugBundleRequest{ Anonymize: anonymizeFlag, - Status: statusOutput, SystemInfo: systemInfoFlag, LogFileCount: logFileCount, } @@ -302,24 +316,6 @@ func setSyncResponsePersistence(cmd *cobra.Command, args []string) error { return nil } -func getStatusOutput(cmd *cobra.Command, anon bool) string { - var statusOutputString string - statusResp, err := getStatus(cmd.Context(), true) - if err != nil { - cmd.PrintErrf("Failed to get status: %v\n", err) - } else { - pm := profilemanager.NewProfileManager() - var profName string - if activeProf, err := pm.GetActiveProfile(); err == nil { - profName = activeProf.Name - } - - overview := nbstatus.ConvertToStatusOutputOverview(statusResp, anon, "", nil, nil, nil, "", profName) - statusOutputString = overview.FullDetailSummary() - } - return statusOutputString -} - func waitForDurationOrCancel(ctx context.Context, duration time.Duration, cmd *cobra.Command) error { ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() @@ -378,7 +374,8 @@ func generateDebugBundle(config *profilemanager.Config, recorder *peer.Status, c InternalConfig: config, StatusRecorder: recorder, SyncResponse: syncResponse, - LogFile: logFilePath, + LogPath: logFilePath, + CPUProfile: nil, }, debug.BundleConfig{ IncludeSystemInfo: true, diff --git a/client/cmd/login.go b/client/cmd/login.go index 57c010571..64b45e557 100644 --- a/client/cmd/login.go +++ b/client/cmd/login.go @@ -7,7 +7,6 @@ import ( "os/user" "runtime" "strings" - "time" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -277,18 +276,19 @@ func handleSSOLogin(ctx context.Context, cmd *cobra.Command, loginResp *proto.Lo } func foregroundLogin(ctx context.Context, cmd *cobra.Command, config *profilemanager.Config, setupKey, profileName string) error { + authClient, err := auth.NewAuth(ctx, config.PrivateKey, config.ManagementURL, config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + needsLogin := false - err := WithBackOff(func() error { - err := internal.Login(ctx, config, "", "") - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - needsLogin = true - return nil - } - return err - }) - if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + err, isAuthError := authClient.Login(ctx, "", "") + if isAuthError { + needsLogin = true + } else if err != nil { + return fmt.Errorf("login check failed: %v", err) } jwtToken := "" @@ -300,23 +300,9 @@ func foregroundLogin(ctx context.Context, cmd *cobra.Command, config *profileman jwtToken = tokenInfo.GetTokenToUse() } - var lastError error - - err = WithBackOff(func() error { - err := internal.Login(ctx, config, setupKey, jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { - lastError = err - return nil - } - return err - }) - - if lastError != nil { - return fmt.Errorf("login failed: %v", lastError) - } - + err, _ = authClient.Login(ctx, setupKey, jwtToken) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } return nil @@ -344,11 +330,7 @@ func foregroundGetTokenInfo(ctx context.Context, cmd *cobra.Command, config *pro openURL(cmd, flowInfo.VerificationURIComplete, flowInfo.UserCode, noBrowser) - waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second - waitCTX, c := context.WithTimeout(context.TODO(), waitTimeout) - defer c() - - tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + tokenInfo, err := oAuthFlow.WaitToken(context.TODO(), flowInfo) if err != nil { return nil, fmt.Errorf("waiting for browser login failed: %v", err) } diff --git a/client/cmd/status.go b/client/cmd/status.go index 05175663c..f09c35c2c 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -99,7 +99,7 @@ func statusFunc(cmd *cobra.Command, args []string) error { profName = activeProf.Name } - var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp, anonymizeFlag, statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilterMap, connectionTypeFilter, profName) + var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp.GetFullStatus(), anonymizeFlag, resp.GetDaemonVersion(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilterMap, connectionTypeFilter, profName) var statusOutputString string switch { case detailFlag: diff --git a/client/cmd/testutil_test.go b/client/cmd/testutil_test.go index 2650d6225..4bda33e65 100644 --- a/client/cmd/testutil_test.go +++ b/client/cmd/testutil_test.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/job" clientProto "github.com/netbirdio/netbird/client/proto" client "github.com/netbirdio/netbird/client/server" @@ -97,6 +98,8 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp peersmanager := peers.NewManager(store, permissionsManagerMock) settingsManagerMock := settings.NewMockManager(ctrl) + jobManager := job.NewJobManager(nil, store, peersmanager) + iv, _ := integrations.NewIntegratedValidator(context.Background(), peersmanager, settingsManagerMock, eventStore) metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) @@ -115,7 +118,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp requestBuffer := mgmt.NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersmanager), config) - accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, iv, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, iv, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) if err != nil { t.Fatal(err) } @@ -124,7 +127,7 @@ func startManagement(t *testing.T, config *config.Config, testFile string) (*grp if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &mgmt.MockIntegratedValidator{}, networkMapController, nil) if err != nil { t.Fatal(err) } diff --git a/client/cmd/up.go b/client/cmd/up.go index 057d35268..9559287d5 100644 --- a/client/cmd/up.go +++ b/client/cmd/up.go @@ -200,7 +200,7 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command, activeProf *pr connectClient := internal.NewConnectClient(ctx, config, r, false) SetupDebugHandler(ctx, config, r, connectClient, "") - return connectClient.Run(nil) + return connectClient.Run(nil, util.FindFirstLogPath(logFiles)) } func runInDaemonMode(ctx context.Context, cmd *cobra.Command, pm *profilemanager.ProfileManager, activeProf *profilemanager.Profile, profileSwitched bool) error { diff --git a/client/embed/embed.go b/client/embed/embed.go index 43089fc9d..e266aae28 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" sshcommon "github.com/netbirdio/netbird/client/ssh" @@ -176,7 +177,13 @@ func (c *Client) Start(startCtx context.Context) error { // nolint:staticcheck ctx = context.WithValue(ctx, system.DeviceNameCtxKey, c.deviceName) - if err := internal.Login(ctx, c.config, c.setupKey, c.jwtToken); err != nil { + authClient, err := auth.NewAuth(ctx, c.config.PrivateKey, c.config.ManagementURL, c.config) + if err != nil { + return fmt.Errorf("create auth client: %w", err) + } + defer authClient.Close() + + if err, _ := authClient.Login(ctx, c.setupKey, c.jwtToken); err != nil { return fmt.Errorf("login: %w", err) } @@ -190,7 +197,7 @@ func (c *Client) Start(startCtx context.Context) error { run := make(chan struct{}) clientErr := make(chan error, 1) go func() { - if err := client.Run(run); err != nil { + if err := client.Run(run, ""); err != nil { clientErr <- err } }() diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 2563a9052..716385705 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -83,6 +83,10 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { return fmt.Errorf("acl manager init: %w", err) } + if err := m.initNoTrackChain(); err != nil { + return fmt.Errorf("init notrack chain: %w", err) + } + // persist early to ensure cleanup of chains go func() { if err := stateManager.PersistState(context.Background()); err != nil { @@ -177,6 +181,10 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { var merr *multierror.Error + if err := m.cleanupNoTrackChain(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("cleanup notrack chain: %w", err)) + } + if err := m.aclMgr.Reset(); err != nil { merr = multierror.Append(merr, fmt.Errorf("reset acl manager: %w", err)) } @@ -277,6 +285,125 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +const ( + chainNameRaw = "NETBIRD-RAW" + chainOUTPUT = "OUTPUT" + tableRaw = "raw" +) + +// SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. +// This prevents conntrack from tracking WireGuard proxy traffic on loopback, which +// can interfere with MASQUERADE rules (e.g., from container runtimes like Podman/netavark). +// +// Traffic flows that need NOTRACK: +// +// 1. Egress: WireGuard -> fake endpoint (before eBPF rewrite) +// src=127.0.0.1:wgPort -> dst=127.0.0.1:fakePort +// Matched by: sport=wgPort +// +// 2. Egress: Proxy -> WireGuard (via raw socket) +// src=127.0.0.1:fakePort -> dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 3. Ingress: Packets to WireGuard +// dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 4. Ingress: Packets to proxy (after eBPF rewrite) +// dst=127.0.0.1:proxyPort +// Matched by: dport=proxyPort +// +// Rules are cleaned up when the firewall manager is closed. +func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + wgPortStr := fmt.Sprintf("%d", wgPort) + proxyPortStr := fmt.Sprintf("%d", proxyPort) + + // Egress rules: match outgoing loopback UDP packets + outputRuleSport := []string{"-o", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--sport", wgPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, outputRuleSport...); err != nil { + return fmt.Errorf("add output sport notrack rule: %w", err) + } + + outputRuleDport := []string{"-o", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--dport", wgPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, outputRuleDport...); err != nil { + return fmt.Errorf("add output dport notrack rule: %w", err) + } + + // Ingress rules: match incoming loopback UDP packets + preroutingRuleWg := []string{"-i", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--dport", wgPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, preroutingRuleWg...); err != nil { + return fmt.Errorf("add prerouting wg notrack rule: %w", err) + } + + preroutingRuleProxy := []string{"-i", "lo", "-s", "127.0.0.1", "-d", "127.0.0.1", "-p", "udp", "--dport", proxyPortStr, "-j", "NOTRACK"} + if err := m.ipv4Client.AppendUnique(tableRaw, chainNameRaw, preroutingRuleProxy...); err != nil { + return fmt.Errorf("add prerouting proxy notrack rule: %w", err) + } + + log.Debugf("set up ebpf proxy notrack rules for ports %d,%d", proxyPort, wgPort) + return nil +} + +func (m *Manager) initNoTrackChain() error { + if err := m.cleanupNoTrackChain(); err != nil { + log.Debugf("cleanup notrack chain: %v", err) + } + + if err := m.ipv4Client.NewChain(tableRaw, chainNameRaw); err != nil { + return fmt.Errorf("create chain: %w", err) + } + + jumpRule := []string{"-j", chainNameRaw} + + if err := m.ipv4Client.InsertUnique(tableRaw, chainOUTPUT, 1, jumpRule...); err != nil { + if delErr := m.ipv4Client.DeleteChain(tableRaw, chainNameRaw); delErr != nil { + log.Debugf("delete orphan chain: %v", delErr) + } + return fmt.Errorf("add output jump rule: %w", err) + } + + if err := m.ipv4Client.InsertUnique(tableRaw, chainPREROUTING, 1, jumpRule...); err != nil { + if delErr := m.ipv4Client.DeleteIfExists(tableRaw, chainOUTPUT, jumpRule...); delErr != nil { + log.Debugf("delete output jump rule: %v", delErr) + } + if delErr := m.ipv4Client.DeleteChain(tableRaw, chainNameRaw); delErr != nil { + log.Debugf("delete orphan chain: %v", delErr) + } + return fmt.Errorf("add prerouting jump rule: %w", err) + } + + return nil +} + +func (m *Manager) cleanupNoTrackChain() error { + exists, err := m.ipv4Client.ChainExists(tableRaw, chainNameRaw) + if err != nil { + return fmt.Errorf("check chain exists: %w", err) + } + if !exists { + return nil + } + + jumpRule := []string{"-j", chainNameRaw} + + if err := m.ipv4Client.DeleteIfExists(tableRaw, chainOUTPUT, jumpRule...); err != nil { + return fmt.Errorf("remove output jump rule: %w", err) + } + + if err := m.ipv4Client.DeleteIfExists(tableRaw, chainPREROUTING, jumpRule...); err != nil { + return fmt.Errorf("remove prerouting jump rule: %w", err) + } + + if err := m.ipv4Client.ClearAndDeleteChain(tableRaw, chainNameRaw); err != nil { + return fmt.Errorf("clear and delete chain: %w", err) + } + + return nil +} + func getConntrackEstablished() []string { return []string{"-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} } diff --git a/client/firewall/manager/firewall.go b/client/firewall/manager/firewall.go index 72e6a5c68..3511a5463 100644 --- a/client/firewall/manager/firewall.go +++ b/client/firewall/manager/firewall.go @@ -168,6 +168,10 @@ type Manager interface { // RemoveInboundDNAT removes inbound DNAT rule RemoveInboundDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + + // SetupEBPFProxyNoTrack creates static notrack rules for eBPF proxy loopback traffic. + // This prevents conntrack from interfering with WireGuard proxy communication. + SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error } func GenKey(format string, pair RouterPair) string { diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index bd19f1067..acf482f86 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -12,6 +12,7 @@ import ( "github.com/google/nftables/binaryutil" "github.com/google/nftables/expr" log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface/wgaddr" @@ -48,8 +49,10 @@ type Manager struct { rConn *nftables.Conn wgIface iFaceMapper - router *router - aclManager *AclManager + router *router + aclManager *AclManager + notrackOutputChain *nftables.Chain + notrackPreroutingChain *nftables.Chain } // Create nftables firewall manager @@ -91,6 +94,10 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { return fmt.Errorf("acl manager init: %w", err) } + if err := m.initNoTrackChains(workTable); err != nil { + return fmt.Errorf("init notrack chains: %w", err) + } + stateManager.RegisterState(&ShutdownState{}) // We only need to record minimal interface state for potential recreation. @@ -288,7 +295,15 @@ func (m *Manager) Flush() error { m.mutex.Lock() defer m.mutex.Unlock() - return m.aclManager.Flush() + if err := m.aclManager.Flush(); err != nil { + return err + } + + if err := m.refreshNoTrackChains(); err != nil { + log.Errorf("failed to refresh notrack chains: %v", err) + } + + return nil } // AddDNATRule adds a DNAT rule @@ -331,6 +346,176 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +const ( + chainNameRawOutput = "netbird-raw-out" + chainNameRawPrerouting = "netbird-raw-pre" +) + +// SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. +// This prevents conntrack from tracking WireGuard proxy traffic on loopback, which +// can interfere with MASQUERADE rules (e.g., from container runtimes like Podman/netavark). +// +// Traffic flows that need NOTRACK: +// +// 1. Egress: WireGuard -> fake endpoint (before eBPF rewrite) +// src=127.0.0.1:wgPort -> dst=127.0.0.1:fakePort +// Matched by: sport=wgPort +// +// 2. Egress: Proxy -> WireGuard (via raw socket) +// src=127.0.0.1:fakePort -> dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 3. Ingress: Packets to WireGuard +// dst=127.0.0.1:wgPort +// Matched by: dport=wgPort +// +// 4. Ingress: Packets to proxy (after eBPF rewrite) +// dst=127.0.0.1:proxyPort +// Matched by: dport=proxyPort +// +// Rules are cleaned up when the firewall manager is closed. +func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.notrackOutputChain == nil || m.notrackPreroutingChain == nil { + return fmt.Errorf("notrack chains not initialized") + } + + proxyPortBytes := binaryutil.BigEndian.PutUint16(proxyPort) + wgPortBytes := binaryutil.BigEndian.PutUint16(wgPort) + loopback := []byte{127, 0, 0, 1} + + // Egress rules: match outgoing loopback UDP packets + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackOutputChain.Table, + Chain: m.notrackOutputChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 0, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: wgPortBytes}, // sport=wgPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackOutputChain.Table, + Chain: m.notrackOutputChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 2, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: wgPortBytes}, // dport=wgPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + + // Ingress rules: match incoming loopback UDP packets + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackPreroutingChain.Table, + Chain: m.notrackPreroutingChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 2, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: wgPortBytes}, // dport=wgPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + m.rConn.AddRule(&nftables.Rule{ + Table: m.notrackPreroutingChain.Table, + Chain: m.notrackPreroutingChain, + Exprs: []expr.Any{ + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: ifname("lo")}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 12, Len: 4}, // saddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseNetworkHeader, Offset: 16, Len: 4}, // daddr + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: loopback}, + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: []byte{unix.IPPROTO_UDP}}, + &expr.Payload{DestRegister: 1, Base: expr.PayloadBaseTransportHeader, Offset: 2, Len: 2}, + &expr.Cmp{Op: expr.CmpOpEq, Register: 1, Data: proxyPortBytes}, // dport=proxyPort + &expr.Counter{}, + &expr.Notrack{}, + }, + }) + + if err := m.rConn.Flush(); err != nil { + return fmt.Errorf("flush notrack rules: %w", err) + } + + log.Debugf("set up ebpf proxy notrack rules for ports %d,%d", proxyPort, wgPort) + return nil +} + +func (m *Manager) initNoTrackChains(table *nftables.Table) error { + m.notrackOutputChain = m.rConn.AddChain(&nftables.Chain{ + Name: chainNameRawOutput, + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityRaw, + }) + + m.notrackPreroutingChain = m.rConn.AddChain(&nftables.Chain{ + Name: chainNameRawPrerouting, + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookPrerouting, + Priority: nftables.ChainPriorityRaw, + }) + + if err := m.rConn.Flush(); err != nil { + return fmt.Errorf("flush chain creation: %w", err) + } + + return nil +} + +func (m *Manager) refreshNoTrackChains() error { + chains, err := m.rConn.ListChainsOfTableFamily(nftables.TableFamilyIPv4) + if err != nil { + return fmt.Errorf("list chains: %w", err) + } + + tableName := getTableName() + for _, c := range chains { + if c.Table.Name != tableName { + continue + } + switch c.Name { + case chainNameRawOutput: + m.notrackOutputChain = c + case chainNameRawPrerouting: + m.notrackPreroutingChain = c + } + } + + return nil +} + func (m *Manager) createWorkTable() (*nftables.Table, error) { tables, err := m.rConn.ListTablesOfFamily(nftables.TableFamilyIPv4) if err != nil { diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index 8caa1a0ad..aacc4ca1c 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -570,6 +570,14 @@ func (m *Manager) SetLegacyManagement(isLegacy bool) error { // Flush doesn't need to be implemented for this manager func (m *Manager) Flush() error { return nil } +// SetupEBPFProxyNoTrack creates notrack rules for eBPF proxy loopback traffic. +func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error { + if m.nativeFirewall == nil { + return nil + } + return m.nativeFirewall.SetupEBPFProxyNoTrack(proxyPort, wgPort) +} + // UpdateSet updates the rule destinations associated with the given set // by merging the existing prefixes with the new ones, then deduplicating. func (m *Manager) UpdateSet(set firewall.Set, prefixes []netip.Prefix) error { diff --git a/client/iface/bind/dual_stack_conn.go b/client/iface/bind/dual_stack_conn.go new file mode 100644 index 000000000..061016ecc --- /dev/null +++ b/client/iface/bind/dual_stack_conn.go @@ -0,0 +1,169 @@ +package bind + +import ( + "errors" + "net" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + log "github.com/sirupsen/logrus" + + nberrors "github.com/netbirdio/netbird/client/errors" +) + +var ( + errNoIPv4Conn = errors.New("no IPv4 connection available") + errNoIPv6Conn = errors.New("no IPv6 connection available") + errInvalidAddr = errors.New("invalid address type") +) + +// DualStackPacketConn wraps IPv4 and IPv6 UDP connections and routes writes +// to the appropriate connection based on the destination address. +// ReadFrom is not used in the hot path - ICEBind receives packets via +// BatchReader.ReadBatch() directly. This is only used by udpMux for sending. +type DualStackPacketConn struct { + ipv4Conn net.PacketConn + ipv6Conn net.PacketConn + + readFromWarn sync.Once +} + +// NewDualStackPacketConn creates a new dual-stack packet connection. +func NewDualStackPacketConn(ipv4Conn, ipv6Conn net.PacketConn) *DualStackPacketConn { + return &DualStackPacketConn{ + ipv4Conn: ipv4Conn, + ipv6Conn: ipv6Conn, + } +} + +// ReadFrom reads from the available connection (preferring IPv4). +// NOTE: This method is NOT used in the data path. ICEBind receives packets via +// BatchReader.ReadBatch() directly for both IPv4 and IPv6, which is much more efficient. +// This implementation exists only to satisfy the net.PacketConn interface for the udpMux, +// but the udpMux only uses WriteTo() for sending STUN responses - it never calls ReadFrom() +// because STUN packets are filtered and forwarded via HandleSTUNMessage() from the receive path. +func (d *DualStackPacketConn) ReadFrom(b []byte) (n int, addr net.Addr, err error) { + d.readFromWarn.Do(func() { + log.Warn("DualStackPacketConn.ReadFrom called - this is unexpected and may indicate an inefficient code path") + }) + + if d.ipv4Conn != nil { + return d.ipv4Conn.ReadFrom(b) + } + if d.ipv6Conn != nil { + return d.ipv6Conn.ReadFrom(b) + } + return 0, nil, net.ErrClosed +} + +// WriteTo writes to the appropriate connection based on the address type. +func (d *DualStackPacketConn) WriteTo(b []byte, addr net.Addr) (n int, err error) { + udpAddr, ok := addr.(*net.UDPAddr) + if !ok { + return 0, &net.OpError{ + Op: "write", + Net: "udp", + Addr: addr, + Err: errInvalidAddr, + } + } + + if udpAddr.IP.To4() == nil { + if d.ipv6Conn != nil { + return d.ipv6Conn.WriteTo(b, addr) + } + return 0, &net.OpError{ + Op: "write", + Net: "udp6", + Addr: addr, + Err: errNoIPv6Conn, + } + } + + if d.ipv4Conn != nil { + return d.ipv4Conn.WriteTo(b, addr) + } + return 0, &net.OpError{ + Op: "write", + Net: "udp4", + Addr: addr, + Err: errNoIPv4Conn, + } +} + +// Close closes both connections. +func (d *DualStackPacketConn) Close() error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.Close(); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.Close(); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} + +// LocalAddr returns the local address of the IPv4 connection if available, +// otherwise the IPv6 connection. +func (d *DualStackPacketConn) LocalAddr() net.Addr { + if d.ipv4Conn != nil { + return d.ipv4Conn.LocalAddr() + } + if d.ipv6Conn != nil { + return d.ipv6Conn.LocalAddr() + } + return nil +} + +// SetDeadline sets the deadline for both connections. +func (d *DualStackPacketConn) SetDeadline(t time.Time) error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.SetDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.SetDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} + +// SetReadDeadline sets the read deadline for both connections. +func (d *DualStackPacketConn) SetReadDeadline(t time.Time) error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.SetReadDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.SetReadDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} + +// SetWriteDeadline sets the write deadline for both connections. +func (d *DualStackPacketConn) SetWriteDeadline(t time.Time) error { + var result *multierror.Error + if d.ipv4Conn != nil { + if err := d.ipv4Conn.SetWriteDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + if d.ipv6Conn != nil { + if err := d.ipv6Conn.SetWriteDeadline(t); err != nil { + result = multierror.Append(result, err) + } + } + return nberrors.FormatErrorOrNil(result) +} diff --git a/client/iface/bind/dual_stack_conn_bench_test.go b/client/iface/bind/dual_stack_conn_bench_test.go new file mode 100644 index 000000000..940c44966 --- /dev/null +++ b/client/iface/bind/dual_stack_conn_bench_test.go @@ -0,0 +1,119 @@ +package bind + +import ( + "net" + "testing" +) + +var ( + ipv4Addr = &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 12345} + ipv6Addr = &net.UDPAddr{IP: net.ParseIP("::1"), Port: 12345} + payload = make([]byte, 1200) +) + +func BenchmarkWriteTo_DirectUDPConn(b *testing.B) { + conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = conn.WriteTo(payload, ipv4Addr) + } +} + +func BenchmarkWriteTo_DualStack_IPv4Only(b *testing.B) { + conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + + ds := NewDualStackPacketConn(conn, nil) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv4Addr) + } +} + +func BenchmarkWriteTo_DualStack_IPv6Only(b *testing.B) { + conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn.Close() + + ds := NewDualStackPacketConn(nil, conn) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv6Addr) + } +} + +func BenchmarkWriteTo_DualStack_Both_IPv4Traffic(b *testing.B) { + conn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn4.Close() + + conn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn6.Close() + + ds := NewDualStackPacketConn(conn4, conn6) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv4Addr) + } +} + +func BenchmarkWriteTo_DualStack_Both_IPv6Traffic(b *testing.B) { + conn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn4.Close() + + conn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn6.Close() + + ds := NewDualStackPacketConn(conn4, conn6) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, ipv6Addr) + } +} + +func BenchmarkWriteTo_DualStack_Both_MixedTraffic(b *testing.B) { + conn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + b.Fatal(err) + } + defer conn4.Close() + + conn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + b.Skipf("IPv6 not available: %v", err) + } + defer conn6.Close() + + ds := NewDualStackPacketConn(conn4, conn6) + addrs := []net.Addr{ipv4Addr, ipv6Addr} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = ds.WriteTo(payload, addrs[i&1]) + } +} diff --git a/client/iface/bind/dual_stack_conn_test.go b/client/iface/bind/dual_stack_conn_test.go new file mode 100644 index 000000000..3007d907f --- /dev/null +++ b/client/iface/bind/dual_stack_conn_test.go @@ -0,0 +1,191 @@ +package bind + +import ( + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDualStackPacketConn_RoutesWritesToCorrectSocket(t *testing.T) { + ipv4Conn := &mockPacketConn{network: "udp4"} + ipv6Conn := &mockPacketConn{network: "udp6"} + dualStack := NewDualStackPacketConn(ipv4Conn, ipv6Conn) + + tests := []struct { + name string + addr *net.UDPAddr + wantSocket string + }{ + { + name: "IPv4 address", + addr: &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}, + wantSocket: "udp4", + }, + { + name: "IPv6 address", + addr: &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}, + wantSocket: "udp6", + }, + { + name: "IPv4-mapped IPv6 goes to IPv4", + addr: &net.UDPAddr{IP: net.ParseIP("::ffff:192.168.1.1"), Port: 1234}, + wantSocket: "udp4", + }, + { + name: "IPv4 loopback", + addr: &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}, + wantSocket: "udp4", + }, + { + name: "IPv6 loopback", + addr: &net.UDPAddr{IP: net.ParseIP("::1"), Port: 1234}, + wantSocket: "udp6", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ipv4Conn.writeCount = 0 + ipv6Conn.writeCount = 0 + + n, err := dualStack.WriteTo([]byte("test"), tt.addr) + require.NoError(t, err) + assert.Equal(t, 4, n) + + if tt.wantSocket == "udp4" { + assert.Equal(t, 1, ipv4Conn.writeCount, "expected write to IPv4") + assert.Equal(t, 0, ipv6Conn.writeCount, "expected no write to IPv6") + } else { + assert.Equal(t, 0, ipv4Conn.writeCount, "expected no write to IPv4") + assert.Equal(t, 1, ipv6Conn.writeCount, "expected write to IPv6") + } + }) + } +} + +func TestDualStackPacketConn_IPv4OnlyRejectsIPv6(t *testing.T) { + dualStack := NewDualStackPacketConn(&mockPacketConn{network: "udp4"}, nil) + + // IPv4 works + _, err := dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}) + require.NoError(t, err) + + // IPv6 fails + _, err = dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}) + require.Error(t, err) + assert.Contains(t, err.Error(), "no IPv6 connection") +} + +func TestDualStackPacketConn_IPv6OnlyRejectsIPv4(t *testing.T) { + dualStack := NewDualStackPacketConn(nil, &mockPacketConn{network: "udp6"}) + + // IPv6 works + _, err := dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}) + require.NoError(t, err) + + // IPv4 fails + _, err = dualStack.WriteTo([]byte("test"), &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}) + require.Error(t, err) + assert.Contains(t, err.Error(), "no IPv4 connection") +} + +// TestDualStackPacketConn_ReadFromIsNotUsedInHotPath documents that ReadFrom +// only reads from one socket (IPv4 preferred). This is fine because the actual +// receive path uses wireguard-go's BatchReader directly, not ReadFrom. +func TestDualStackPacketConn_ReadFromIsNotUsedInHotPath(t *testing.T) { + ipv4Conn := &mockPacketConn{ + network: "udp4", + readData: []byte("from ipv4"), + readAddr: &net.UDPAddr{IP: net.ParseIP("192.168.1.1"), Port: 1234}, + } + ipv6Conn := &mockPacketConn{ + network: "udp6", + readData: []byte("from ipv6"), + readAddr: &net.UDPAddr{IP: net.ParseIP("2001:db8::1"), Port: 1234}, + } + + dualStack := NewDualStackPacketConn(ipv4Conn, ipv6Conn) + + buf := make([]byte, 100) + n, addr, err := dualStack.ReadFrom(buf) + + require.NoError(t, err) + // reads from IPv4 (preferred) - this is expected behavior + assert.Equal(t, "from ipv4", string(buf[:n])) + assert.Equal(t, "192.168.1.1", addr.(*net.UDPAddr).IP.String()) +} + +func TestDualStackPacketConn_LocalAddrPrefersIPv4(t *testing.T) { + ipv4Addr := &net.UDPAddr{IP: net.ParseIP("0.0.0.0"), Port: 51820} + ipv6Addr := &net.UDPAddr{IP: net.ParseIP("::"), Port: 51820} + + tests := []struct { + name string + ipv4 net.PacketConn + ipv6 net.PacketConn + wantAddr net.Addr + }{ + { + name: "both available returns IPv4", + ipv4: &mockPacketConn{localAddr: ipv4Addr}, + ipv6: &mockPacketConn{localAddr: ipv6Addr}, + wantAddr: ipv4Addr, + }, + { + name: "IPv4 only", + ipv4: &mockPacketConn{localAddr: ipv4Addr}, + ipv6: nil, + wantAddr: ipv4Addr, + }, + { + name: "IPv6 only", + ipv4: nil, + ipv6: &mockPacketConn{localAddr: ipv6Addr}, + wantAddr: ipv6Addr, + }, + { + name: "neither returns nil", + ipv4: nil, + ipv6: nil, + wantAddr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dualStack := NewDualStackPacketConn(tt.ipv4, tt.ipv6) + assert.Equal(t, tt.wantAddr, dualStack.LocalAddr()) + }) + } +} + +// mock + +type mockPacketConn struct { + network string + writeCount int + readData []byte + readAddr net.Addr + localAddr net.Addr +} + +func (m *mockPacketConn) ReadFrom(b []byte) (n int, addr net.Addr, err error) { + if m.readData != nil { + return copy(b, m.readData), m.readAddr, nil + } + return 0, nil, nil +} + +func (m *mockPacketConn) WriteTo(b []byte, addr net.Addr) (n int, err error) { + m.writeCount++ + return len(b), nil +} + +func (m *mockPacketConn) Close() error { return nil } +func (m *mockPacketConn) LocalAddr() net.Addr { return m.localAddr } +func (m *mockPacketConn) SetDeadline(t time.Time) error { return nil } +func (m *mockPacketConn) SetReadDeadline(t time.Time) error { return nil } +func (m *mockPacketConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/client/iface/bind/ice_bind.go b/client/iface/bind/ice_bind.go index 0957d2dd5..bf79ecd79 100644 --- a/client/iface/bind/ice_bind.go +++ b/client/iface/bind/ice_bind.go @@ -14,7 +14,6 @@ import ( "github.com/pion/stun/v3" "github.com/pion/transport/v3" log "github.com/sirupsen/logrus" - "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" wgConn "golang.zx2c4.com/wireguard/conn" @@ -28,22 +27,7 @@ type receiverCreator struct { } func (rc receiverCreator) CreateReceiverFn(pc wgConn.BatchReader, conn *net.UDPConn, rxOffload bool, msgPool *sync.Pool) wgConn.ReceiveFunc { - if ipv4PC, ok := pc.(*ipv4.PacketConn); ok { - return rc.iceBind.createIPv4ReceiverFn(ipv4PC, conn, rxOffload, msgPool) - } - // IPv6 is currently not supported in the udpmux, this is a stub for compatibility with the - // wireguard-go ReceiverCreator interface which is called for both IPv4 and IPv6. - return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) { - buf := bufs[0] - size, ep, err := conn.ReadFromUDPAddrPort(buf) - if err != nil { - return 0, err - } - sizes[0] = size - stdEp := &wgConn.StdNetEndpoint{AddrPort: ep} - eps[0] = stdEp - return 1, nil - } + return rc.iceBind.createReceiverFn(pc, conn, rxOffload, msgPool) } // ICEBind is a bind implementation with two main features: @@ -73,6 +57,8 @@ type ICEBind struct { muUDPMux sync.Mutex udpMux *udpmux.UniversalUDPMuxDefault + ipv4Conn *net.UDPConn + ipv6Conn *net.UDPConn } func NewICEBind(transportNet transport.Net, filterFn udpmux.FilterFn, address wgaddr.Address, mtu uint16) *ICEBind { @@ -118,6 +104,12 @@ func (s *ICEBind) Close() error { close(s.closedChan) + s.muUDPMux.Lock() + s.ipv4Conn = nil + s.ipv6Conn = nil + s.udpMux = nil + s.muUDPMux.Unlock() + return s.StdNetBind.Close() } @@ -175,19 +167,18 @@ func (b *ICEBind) Send(bufs [][]byte, ep wgConn.Endpoint) error { return nil } -func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, rxOffload bool, msgsPool *sync.Pool) wgConn.ReceiveFunc { +func (s *ICEBind) createReceiverFn(pc wgConn.BatchReader, conn *net.UDPConn, rxOffload bool, msgsPool *sync.Pool) wgConn.ReceiveFunc { s.muUDPMux.Lock() defer s.muUDPMux.Unlock() - s.udpMux = udpmux.NewUniversalUDPMuxDefault( - udpmux.UniversalUDPMuxParams{ - UDPConn: nbnet.WrapPacketConn(conn), - Net: s.transportNet, - FilterFn: s.filterFn, - WGAddress: s.address, - MTU: s.mtu, - }, - ) + // Detect IPv4 vs IPv6 from connection's local address + if localAddr := conn.LocalAddr().(*net.UDPAddr); localAddr.IP.To4() != nil { + s.ipv4Conn = conn + } else { + s.ipv6Conn = conn + } + s.createOrUpdateMux() + return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) { msgs := getMessages(msgsPool) for i := range bufs { @@ -195,12 +186,13 @@ func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, r (*msgs)[i].OOB = (*msgs)[i].OOB[:cap((*msgs)[i].OOB)] } defer putMessages(msgs, msgsPool) + var numMsgs int if runtime.GOOS == "linux" || runtime.GOOS == "android" { if rxOffload { readAt := len(*msgs) - (wgConn.IdealBatchSize / wgConn.UdpSegmentMaxDatagrams) - //nolint - numMsgs, err = pc.ReadBatch((*msgs)[readAt:], 0) + //nolint:staticcheck + _, err = pc.ReadBatch((*msgs)[readAt:], 0) if err != nil { return 0, err } @@ -222,12 +214,12 @@ func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, r } numMsgs = 1 } + for i := 0; i < numMsgs; i++ { msg := &(*msgs)[i] // todo: handle err - ok, _ := s.filterOutStunMessages(msg.Buffers, msg.N, msg.Addr) - if ok { + if ok, _ := s.filterOutStunMessages(msg.Buffers, msg.N, msg.Addr); ok { continue } sizes[i] = msg.N @@ -248,6 +240,38 @@ func (s *ICEBind) createIPv4ReceiverFn(pc *ipv4.PacketConn, conn *net.UDPConn, r } } +// createOrUpdateMux creates or updates the UDP mux with the available connections. +// Must be called with muUDPMux held. +func (s *ICEBind) createOrUpdateMux() { + var muxConn net.PacketConn + + switch { + case s.ipv4Conn != nil && s.ipv6Conn != nil: + muxConn = NewDualStackPacketConn( + nbnet.WrapPacketConn(s.ipv4Conn), + nbnet.WrapPacketConn(s.ipv6Conn), + ) + case s.ipv4Conn != nil: + muxConn = nbnet.WrapPacketConn(s.ipv4Conn) + case s.ipv6Conn != nil: + muxConn = nbnet.WrapPacketConn(s.ipv6Conn) + default: + return + } + + // Don't close the old mux - it doesn't own the underlying connections. + // The sockets are managed by WireGuard's StdNetBind, not by us. + s.udpMux = udpmux.NewUniversalUDPMuxDefault( + udpmux.UniversalUDPMuxParams{ + UDPConn: muxConn, + Net: s.transportNet, + FilterFn: s.filterFn, + WGAddress: s.address, + MTU: s.mtu, + }, + ) +} + func (s *ICEBind) filterOutStunMessages(buffers [][]byte, n int, addr net.Addr) (bool, error) { for i := range buffers { if !stun.IsMessage(buffers[i]) { @@ -260,9 +284,14 @@ func (s *ICEBind) filterOutStunMessages(buffers [][]byte, n int, addr net.Addr) return true, err } - muxErr := s.udpMux.HandleSTUNMessage(msg, addr) - if muxErr != nil { - log.Warnf("failed to handle STUN packet") + s.muUDPMux.Lock() + mux := s.udpMux + s.muUDPMux.Unlock() + + if mux != nil { + if muxErr := mux.HandleSTUNMessage(msg, addr); muxErr != nil { + log.Warnf("failed to handle STUN packet: %v", muxErr) + } } buffers[i] = []byte{} diff --git a/client/iface/bind/ice_bind_test.go b/client/iface/bind/ice_bind_test.go new file mode 100644 index 000000000..1fdd955c9 --- /dev/null +++ b/client/iface/bind/ice_bind_test.go @@ -0,0 +1,324 @@ +package bind + +import ( + "fmt" + "net" + "net/netip" + "sync" + "testing" + "time" + + "github.com/pion/transport/v3/stdnet" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + + "github.com/netbirdio/netbird/client/iface/wgaddr" +) + +func TestICEBind_CreatesReceiverForBothIPv4AndIPv6(t *testing.T) { + iceBind := setupICEBind(t) + + ipv4Conn, ipv6Conn := createDualStackConns(t) + defer ipv4Conn.Close() + defer ipv6Conn.Close() + + rc := receiverCreator{iceBind} + pool := createMsgPool() + + // Simulate wireguard-go calling CreateReceiverFn for IPv4 + ipv4RecvFn := rc.CreateReceiverFn(ipv4.NewPacketConn(ipv4Conn), ipv4Conn, false, pool) + require.NotNil(t, ipv4RecvFn) + + iceBind.muUDPMux.Lock() + assert.NotNil(t, iceBind.ipv4Conn, "should store IPv4 connection") + assert.Nil(t, iceBind.ipv6Conn, "IPv6 not added yet") + assert.NotNil(t, iceBind.udpMux, "mux should be created after first connection") + iceBind.muUDPMux.Unlock() + + // Simulate wireguard-go calling CreateReceiverFn for IPv6 + ipv6RecvFn := rc.CreateReceiverFn(ipv6.NewPacketConn(ipv6Conn), ipv6Conn, false, pool) + require.NotNil(t, ipv6RecvFn) + + iceBind.muUDPMux.Lock() + assert.NotNil(t, iceBind.ipv4Conn, "should still have IPv4 connection") + assert.NotNil(t, iceBind.ipv6Conn, "should now have IPv6 connection") + assert.NotNil(t, iceBind.udpMux, "mux should still exist") + iceBind.muUDPMux.Unlock() + + mux, err := iceBind.GetICEMux() + require.NoError(t, err) + require.NotNil(t, mux) +} + +func TestICEBind_WorksWithIPv4Only(t *testing.T) { + iceBind := setupICEBind(t) + + ipv4Conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + require.NoError(t, err) + defer ipv4Conn.Close() + + rc := receiverCreator{iceBind} + recvFn := rc.CreateReceiverFn(ipv4.NewPacketConn(ipv4Conn), ipv4Conn, false, createMsgPool()) + require.NotNil(t, recvFn) + + iceBind.muUDPMux.Lock() + assert.NotNil(t, iceBind.ipv4Conn) + assert.Nil(t, iceBind.ipv6Conn) + assert.NotNil(t, iceBind.udpMux) + iceBind.muUDPMux.Unlock() + + mux, err := iceBind.GetICEMux() + require.NoError(t, err) + require.NotNil(t, mux) +} + +func TestICEBind_WorksWithIPv6Only(t *testing.T) { + iceBind := setupICEBind(t) + + ipv6Conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + t.Skipf("IPv6 not available: %v", err) + } + defer ipv6Conn.Close() + + rc := receiverCreator{iceBind} + recvFn := rc.CreateReceiverFn(ipv6.NewPacketConn(ipv6Conn), ipv6Conn, false, createMsgPool()) + require.NotNil(t, recvFn) + + iceBind.muUDPMux.Lock() + assert.Nil(t, iceBind.ipv4Conn) + assert.NotNil(t, iceBind.ipv6Conn) + assert.NotNil(t, iceBind.udpMux) + iceBind.muUDPMux.Unlock() + + mux, err := iceBind.GetICEMux() + require.NoError(t, err) + require.NotNil(t, mux) +} + +// TestICEBind_SendsToIPv4AndIPv6PeersSimultaneously verifies that we can communicate +// with peers on different address families through the same DualStackPacketConn. +func TestICEBind_SendsToIPv4AndIPv6PeersSimultaneously(t *testing.T) { + // two "remote peers" listening on different address families + ipv4Peer := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Peer.Close() + + ipv6Peer, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6loopback, Port: 0}) + if err != nil { + t.Skipf("IPv6 not available: %v", err) + } + defer ipv6Peer.Close() + + // our local dual-stack connection + ipv4Local := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Local.Close() + + ipv6Local := listenUDP(t, "udp6", "[::1]:0") + defer ipv6Local.Close() + + dualStack := NewDualStackPacketConn(ipv4Local, ipv6Local) + + // send to both peers + _, err = dualStack.WriteTo([]byte("to-ipv4"), ipv4Peer.LocalAddr()) + require.NoError(t, err) + + _, err = dualStack.WriteTo([]byte("to-ipv6"), ipv6Peer.LocalAddr()) + require.NoError(t, err) + + // verify IPv4 peer got its packet from the IPv4 socket + buf := make([]byte, 100) + _ = ipv4Peer.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err := ipv4Peer.ReadFrom(buf) + require.NoError(t, err) + assert.Equal(t, "to-ipv4", string(buf[:n])) + assert.Equal(t, ipv4Local.LocalAddr().(*net.UDPAddr).Port, addr.(*net.UDPAddr).Port) + + // verify IPv6 peer got its packet from the IPv6 socket + _ = ipv6Peer.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err = ipv6Peer.ReadFrom(buf) + require.NoError(t, err) + assert.Equal(t, "to-ipv6", string(buf[:n])) + assert.Equal(t, ipv6Local.LocalAddr().(*net.UDPAddr).Port, addr.(*net.UDPAddr).Port) +} + +// TestICEBind_HandlesConcurrentMixedTraffic sends packets concurrently to both IPv4 +// and IPv6 peers. Verifies no packets get misrouted (IPv4 peer only gets v4- packets, +// IPv6 peer only gets v6- packets). Some packet loss is acceptable for UDP. +func TestICEBind_HandlesConcurrentMixedTraffic(t *testing.T) { + ipv4Peer := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Peer.Close() + + ipv6Peer, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6loopback, Port: 0}) + if err != nil { + t.Skipf("IPv6 not available: %v", err) + } + defer ipv6Peer.Close() + + ipv4Local := listenUDP(t, "udp4", "127.0.0.1:0") + defer ipv4Local.Close() + + ipv6Local := listenUDP(t, "udp6", "[::1]:0") + defer ipv6Local.Close() + + dualStack := NewDualStackPacketConn(ipv4Local, ipv6Local) + + const packetsPerFamily = 500 + + ipv4Received := make(chan string, packetsPerFamily) + ipv6Received := make(chan string, packetsPerFamily) + + startGate := make(chan struct{}) + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 100) + for i := 0; i < packetsPerFamily; i++ { + n, _, err := ipv4Peer.ReadFrom(buf) + if err != nil { + return + } + ipv4Received <- string(buf[:n]) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + buf := make([]byte, 100) + for i := 0; i < packetsPerFamily; i++ { + n, _, err := ipv6Peer.ReadFrom(buf) + if err != nil { + return + } + ipv6Received <- string(buf[:n]) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + <-startGate + for i := 0; i < packetsPerFamily; i++ { + _, _ = dualStack.WriteTo([]byte(fmt.Sprintf("v4-%04d", i)), ipv4Peer.LocalAddr()) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + <-startGate + for i := 0; i < packetsPerFamily; i++ { + _, _ = dualStack.WriteTo([]byte(fmt.Sprintf("v6-%04d", i)), ipv6Peer.LocalAddr()) + } + }() + + close(startGate) + + time.AfterFunc(5*time.Second, func() { + _ = ipv4Peer.SetReadDeadline(time.Now()) + _ = ipv6Peer.SetReadDeadline(time.Now()) + }) + + wg.Wait() + close(ipv4Received) + close(ipv6Received) + + ipv4Count := 0 + for pkt := range ipv4Received { + require.True(t, len(pkt) >= 3 && pkt[:3] == "v4-", "IPv4 peer got misrouted packet: %s", pkt) + ipv4Count++ + } + + ipv6Count := 0 + for pkt := range ipv6Received { + require.True(t, len(pkt) >= 3 && pkt[:3] == "v6-", "IPv6 peer got misrouted packet: %s", pkt) + ipv6Count++ + } + + assert.Equal(t, packetsPerFamily, ipv4Count) + assert.Equal(t, packetsPerFamily, ipv6Count) +} + +func TestICEBind_DetectsAddressFamilyFromConnection(t *testing.T) { + tests := []struct { + name string + network string + addr string + wantIPv4 bool + }{ + {"IPv4 any", "udp4", "0.0.0.0:0", true}, + {"IPv4 loopback", "udp4", "127.0.0.1:0", true}, + {"IPv6 any", "udp6", "[::]:0", false}, + {"IPv6 loopback", "udp6", "[::1]:0", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + addr, err := net.ResolveUDPAddr(tt.network, tt.addr) + require.NoError(t, err) + + conn, err := net.ListenUDP(tt.network, addr) + if err != nil { + t.Skipf("%s not available: %v", tt.network, err) + } + defer conn.Close() + + localAddr := conn.LocalAddr().(*net.UDPAddr) + isIPv4 := localAddr.IP.To4() != nil + assert.Equal(t, tt.wantIPv4, isIPv4) + }) + } +} + +// helpers + +func setupICEBind(t *testing.T) *ICEBind { + t.Helper() + transportNet, err := stdnet.NewNet() + require.NoError(t, err) + + address := wgaddr.Address{ + IP: netip.MustParseAddr("100.64.0.1"), + Network: netip.MustParsePrefix("100.64.0.0/10"), + } + return NewICEBind(transportNet, nil, address, 1280) +} + +func createDualStackConns(t *testing.T) (*net.UDPConn, *net.UDPConn) { + t.Helper() + ipv4Conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + require.NoError(t, err) + + ipv6Conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + ipv4Conn.Close() + t.Skipf("IPv6 not available: %v", err) + } + return ipv4Conn, ipv6Conn +} + +func createMsgPool() *sync.Pool { + return &sync.Pool{ + New: func() any { + msgs := make([]ipv6.Message, 1) + for i := range msgs { + msgs[i].Buffers = make(net.Buffers, 1) + msgs[i].OOB = make([]byte, 0, 40) + } + return &msgs + }, + } +} + +func listenUDP(t *testing.T, network, addr string) *net.UDPConn { + t.Helper() + udpAddr, err := net.ResolveUDPAddr(network, addr) + require.NoError(t, err) + conn, err := net.ListenUDP(network, udpAddr) + require.NoError(t, err) + return conn +} diff --git a/client/iface/configurer/common.go b/client/iface/configurer/common.go index 088cff69d..10162d703 100644 --- a/client/iface/configurer/common.go +++ b/client/iface/configurer/common.go @@ -3,8 +3,22 @@ package configurer import ( "net" "net/netip" + + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" ) +// buildPresharedKeyConfig creates a wgtypes.Config for setting a preshared key on a peer. +// This is a shared helper used by both kernel and userspace configurers. +func buildPresharedKeyConfig(peerKey wgtypes.Key, psk wgtypes.Key, updateOnly bool) wgtypes.Config { + return wgtypes.Config{ + Peers: []wgtypes.PeerConfig{{ + PublicKey: peerKey, + PresharedKey: &psk, + UpdateOnly: updateOnly, + }}, + } +} + func prefixesToIPNets(prefixes []netip.Prefix) []net.IPNet { ipNets := make([]net.IPNet, len(prefixes)) for i, prefix := range prefixes { diff --git a/client/iface/configurer/kernel_unix.go b/client/iface/configurer/kernel_unix.go index 96b286175..a29fe181a 100644 --- a/client/iface/configurer/kernel_unix.go +++ b/client/iface/configurer/kernel_unix.go @@ -15,8 +15,6 @@ import ( "github.com/netbirdio/netbird/monotime" ) -var zeroKey wgtypes.Key - type KernelConfigurer struct { deviceName string } @@ -48,6 +46,18 @@ func (c *KernelConfigurer) ConfigureInterface(privateKey string, port int) error return nil } +// SetPresharedKey sets the preshared key for a peer. +// If updateOnly is true, only updates the existing peer; if false, creates or updates. +func (c *KernelConfigurer) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + parsedPeerKey, err := wgtypes.ParseKey(peerKey) + if err != nil { + return err + } + + cfg := buildPresharedKeyConfig(parsedPeerKey, psk, updateOnly) + return c.configure(cfg) +} + func (c *KernelConfigurer) UpdatePeer(peerKey string, allowedIps []netip.Prefix, keepAlive time.Duration, endpoint *net.UDPAddr, preSharedKey *wgtypes.Key) error { peerKeyParsed, err := wgtypes.ParseKey(peerKey) if err != nil { @@ -279,7 +289,7 @@ func (c *KernelConfigurer) FullStats() (*Stats, error) { TxBytes: p.TransmitBytes, RxBytes: p.ReceiveBytes, LastHandshake: p.LastHandshakeTime, - PresharedKey: p.PresharedKey != zeroKey, + PresharedKey: [32]byte(p.PresharedKey), } if p.Endpoint != nil { peer.Endpoint = *p.Endpoint diff --git a/client/iface/configurer/usp.go b/client/iface/configurer/usp.go index bc875b73c..c4ea349df 100644 --- a/client/iface/configurer/usp.go +++ b/client/iface/configurer/usp.go @@ -22,17 +22,16 @@ import ( ) const ( - privateKey = "private_key" - ipcKeyLastHandshakeTimeSec = "last_handshake_time_sec" - ipcKeyLastHandshakeTimeNsec = "last_handshake_time_nsec" - ipcKeyTxBytes = "tx_bytes" - ipcKeyRxBytes = "rx_bytes" - allowedIP = "allowed_ip" - endpoint = "endpoint" - fwmark = "fwmark" - listenPort = "listen_port" - publicKey = "public_key" - presharedKey = "preshared_key" + privateKey = "private_key" + ipcKeyLastHandshakeTimeSec = "last_handshake_time_sec" + ipcKeyTxBytes = "tx_bytes" + ipcKeyRxBytes = "rx_bytes" + allowedIP = "allowed_ip" + endpoint = "endpoint" + fwmark = "fwmark" + listenPort = "listen_port" + publicKey = "public_key" + presharedKey = "preshared_key" ) var ErrAllowedIPNotFound = fmt.Errorf("allowed IP not found") @@ -72,6 +71,18 @@ func (c *WGUSPConfigurer) ConfigureInterface(privateKey string, port int) error return c.device.IpcSet(toWgUserspaceString(config)) } +// SetPresharedKey sets the preshared key for a peer. +// If updateOnly is true, only updates the existing peer; if false, creates or updates. +func (c *WGUSPConfigurer) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + parsedPeerKey, err := wgtypes.ParseKey(peerKey) + if err != nil { + return err + } + + cfg := buildPresharedKeyConfig(parsedPeerKey, psk, updateOnly) + return c.device.IpcSet(toWgUserspaceString(cfg)) +} + func (c *WGUSPConfigurer) UpdatePeer(peerKey string, allowedIps []netip.Prefix, keepAlive time.Duration, endpoint *net.UDPAddr, preSharedKey *wgtypes.Key) error { peerKeyParsed, err := wgtypes.ParseKey(peerKey) if err != nil { @@ -422,23 +433,19 @@ func toWgUserspaceString(wgCfg wgtypes.Config) string { hexKey := hex.EncodeToString(p.PublicKey[:]) sb.WriteString(fmt.Sprintf("public_key=%s\n", hexKey)) + if p.Remove { + sb.WriteString("remove=true\n") + } + + if p.UpdateOnly { + sb.WriteString("update_only=true\n") + } + if p.PresharedKey != nil { preSharedHexKey := hex.EncodeToString(p.PresharedKey[:]) sb.WriteString(fmt.Sprintf("preshared_key=%s\n", preSharedHexKey)) } - if p.Remove { - sb.WriteString("remove=true") - } - - if p.ReplaceAllowedIPs { - sb.WriteString("replace_allowed_ips=true\n") - } - - for _, aip := range p.AllowedIPs { - sb.WriteString(fmt.Sprintf("allowed_ip=%s\n", aip.String())) - } - if p.Endpoint != nil { sb.WriteString(fmt.Sprintf("endpoint=%s\n", p.Endpoint.String())) } @@ -446,6 +453,14 @@ func toWgUserspaceString(wgCfg wgtypes.Config) string { if p.PersistentKeepaliveInterval != nil { sb.WriteString(fmt.Sprintf("persistent_keepalive_interval=%d\n", int(p.PersistentKeepaliveInterval.Seconds()))) } + + if p.ReplaceAllowedIPs { + sb.WriteString("replace_allowed_ips=true\n") + } + + for _, aip := range p.AllowedIPs { + sb.WriteString(fmt.Sprintf("allowed_ip=%s\n", aip.String())) + } } return sb.String() } @@ -599,7 +614,9 @@ func parseStatus(deviceName, ipcStr string) (*Stats, error) { continue } if val != "" && val != "0000000000000000000000000000000000000000000000000000000000000000" { - currentPeer.PresharedKey = true + if pskKey, err := hexToWireguardKey(val); err == nil { + currentPeer.PresharedKey = [32]byte(pskKey) + } } } } diff --git a/client/iface/configurer/wgshow.go b/client/iface/configurer/wgshow.go index 604264026..4a5c31160 100644 --- a/client/iface/configurer/wgshow.go +++ b/client/iface/configurer/wgshow.go @@ -12,7 +12,7 @@ type Peer struct { TxBytes int64 RxBytes int64 LastHandshake time.Time - PresharedKey bool + PresharedKey [32]byte } type Stats struct { diff --git a/client/iface/device/interface.go b/client/iface/device/interface.go index db53d9c3a..7bab7b757 100644 --- a/client/iface/device/interface.go +++ b/client/iface/device/interface.go @@ -17,6 +17,7 @@ type WGConfigurer interface { RemovePeer(peerKey string) error AddAllowedIP(peerKey string, allowedIP netip.Prefix) error RemoveAllowedIP(peerKey string, allowedIP netip.Prefix) error + SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error Close() GetStats() (map[string]configurer.WGStats, error) FullStats() (*configurer.Stats, error) diff --git a/client/iface/iface.go b/client/iface/iface.go index 07235a995..e5623c979 100644 --- a/client/iface/iface.go +++ b/client/iface/iface.go @@ -50,6 +50,7 @@ func ValidateMTU(mtu uint16) error { type wgProxyFactory interface { GetProxy() wgproxy.Proxy + GetProxyPort() uint16 Free() error } @@ -80,6 +81,12 @@ func (w *WGIface) GetProxy() wgproxy.Proxy { return w.wgProxyFactory.GetProxy() } +// GetProxyPort returns the proxy port used by the WireGuard proxy. +// Returns 0 if no proxy port is used (e.g., for userspace WireGuard). +func (w *WGIface) GetProxyPort() uint16 { + return w.wgProxyFactory.GetProxyPort() +} + // GetBind returns the EndpointManager userspace bind mode. func (w *WGIface) GetBind() device.EndpointManager { w.mu.Lock() @@ -297,6 +304,19 @@ func (w *WGIface) FullStats() (*configurer.Stats, error) { return w.configurer.FullStats() } +// SetPresharedKey sets or updates the preshared key for a peer. +// If updateOnly is true, only updates existing peer; if false, creates or updates. +func (w *WGIface) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.configurer == nil { + return ErrIfaceNotFound + } + + return w.configurer.SetPresharedKey(peerKey, psk, updateOnly) +} + func (w *WGIface) waitUntilRemoved() error { maxWaitTime := 5 * time.Second timeout := time.NewTimer(maxWaitTime) diff --git a/client/iface/wgproxy/bind/proxy.go b/client/iface/wgproxy/bind/proxy.go index eb585d8a2..9ac3ea6df 100644 --- a/client/iface/wgproxy/bind/proxy.go +++ b/client/iface/wgproxy/bind/proxy.go @@ -114,21 +114,21 @@ func (p *ProxyBind) Pause() { } func (p *ProxyBind) RedirectAs(endpoint *net.UDPAddr) { + ep, err := addrToEndpoint(endpoint) + if err != nil { + log.Errorf("failed to start package redirection: %v", err) + return + } + p.pausedCond.L.Lock() p.paused = false - p.wgCurrentUsed = addrToEndpoint(endpoint) + p.wgCurrentUsed = ep p.pausedCond.Signal() p.pausedCond.L.Unlock() } -func addrToEndpoint(addr *net.UDPAddr) *bind.Endpoint { - ip, _ := netip.AddrFromSlice(addr.IP.To4()) - addrPort := netip.AddrPortFrom(ip, uint16(addr.Port)) - return &bind.Endpoint{AddrPort: addrPort} -} - func (p *ProxyBind) CloseConn() error { if p.cancel == nil { return fmt.Errorf("proxy not started") @@ -212,3 +212,16 @@ func fakeAddress(peerAddress *net.UDPAddr) (*netip.AddrPort, error) { netipAddr := netip.AddrPortFrom(fakeIP, uint16(peerAddress.Port)) return &netipAddr, nil } + +func addrToEndpoint(addr *net.UDPAddr) (*bind.Endpoint, error) { + if addr == nil { + return nil, fmt.Errorf("invalid address") + } + ip, ok := netip.AddrFromSlice(addr.IP) + if !ok { + return nil, fmt.Errorf("convert %s to netip.Addr", addr) + } + + addrPort := netip.AddrPortFrom(ip.Unmap(), uint16(addr.Port)) + return &bind.Endpoint{AddrPort: addrPort}, nil +} diff --git a/client/iface/wgproxy/ebpf/proxy.go b/client/iface/wgproxy/ebpf/proxy.go index 858143091..5458519fa 100644 --- a/client/iface/wgproxy/ebpf/proxy.go +++ b/client/iface/wgproxy/ebpf/proxy.go @@ -27,12 +27,19 @@ const ( ) var ( - localHostNetIP = net.ParseIP("127.0.0.1") + localHostNetIPv4 = net.ParseIP("127.0.0.1") + localHostNetIPv6 = net.ParseIP("::1") + + serializeOpts = gopacket.SerializeOptions{ + ComputeChecksums: true, + FixLengths: true, + } ) // WGEBPFProxy definition for proxy with EBPF support type WGEBPFProxy struct { localWGListenPort int + proxyPort int mtu uint16 ebpfManager ebpfMgr.Manager @@ -40,7 +47,8 @@ type WGEBPFProxy struct { turnConnMutex sync.Mutex lastUsedPort uint16 - rawConn net.PacketConn + rawConnIPv4 net.PacketConn + rawConnIPv6 net.PacketConn conn transport.UDPConn ctx context.Context @@ -62,23 +70,39 @@ func NewWGEBPFProxy(wgPort int, mtu uint16) *WGEBPFProxy { // Listen load ebpf program and listen the proxy func (p *WGEBPFProxy) Listen() error { pl := portLookup{} - wgPorxyPort, err := pl.searchFreePort() + proxyPort, err := pl.searchFreePort() + if err != nil { + return err + } + p.proxyPort = proxyPort + + // Prepare IPv4 raw socket (required) + p.rawConnIPv4, err = rawsocket.PrepareSenderRawSocketIPv4() if err != nil { return err } - p.rawConn, err = rawsocket.PrepareSenderRawSocket() + // Prepare IPv6 raw socket (optional) + p.rawConnIPv6, err = rawsocket.PrepareSenderRawSocketIPv6() if err != nil { - return err + log.Warnf("failed to prepare IPv6 raw socket, continuing with IPv4 only: %v", err) } - err = p.ebpfManager.LoadWgProxy(wgPorxyPort, p.localWGListenPort) + err = p.ebpfManager.LoadWgProxy(proxyPort, p.localWGListenPort) if err != nil { + if closeErr := p.rawConnIPv4.Close(); closeErr != nil { + log.Warnf("failed to close IPv4 raw socket: %v", closeErr) + } + if p.rawConnIPv6 != nil { + if closeErr := p.rawConnIPv6.Close(); closeErr != nil { + log.Warnf("failed to close IPv6 raw socket: %v", closeErr) + } + } return err } addr := net.UDPAddr{ - Port: wgPorxyPort, + Port: proxyPort, IP: net.ParseIP(loopbackAddr), } @@ -94,7 +118,7 @@ func (p *WGEBPFProxy) Listen() error { p.conn = conn go p.proxyToRemote() - log.Infof("local wg proxy listening on: %d", wgPorxyPort) + log.Infof("local wg proxy listening on: %d", proxyPort) return nil } @@ -135,12 +159,25 @@ func (p *WGEBPFProxy) Free() error { result = multierror.Append(result, err) } - if err := p.rawConn.Close(); err != nil { - result = multierror.Append(result, err) + if p.rawConnIPv4 != nil { + if err := p.rawConnIPv4.Close(); err != nil { + result = multierror.Append(result, err) + } + } + + if p.rawConnIPv6 != nil { + if err := p.rawConnIPv6.Close(); err != nil { + result = multierror.Append(result, err) + } } return nberrors.FormatErrorOrNil(result) } +// GetProxyPort returns the proxy listening port. +func (p *WGEBPFProxy) GetProxyPort() uint16 { + return uint16(p.proxyPort) +} + // proxyToRemote read messages from local WireGuard interface and forward it to remote conn // From this go routine has only one instance. func (p *WGEBPFProxy) proxyToRemote() { @@ -218,31 +255,60 @@ generatePort: } func (p *WGEBPFProxy) sendPkg(data []byte, endpointAddr *net.UDPAddr) error { - payload := gopacket.Payload(data) - ipH := &layers.IPv4{ - DstIP: localHostNetIP, - SrcIP: endpointAddr.IP, - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, + + var ipH gopacket.SerializableLayer + var networkLayer gopacket.NetworkLayer + var dstIP net.IP + var rawConn net.PacketConn + + if endpointAddr.IP.To4() != nil { + // IPv4 path + ipv4 := &layers.IPv4{ + DstIP: localHostNetIPv4, + SrcIP: endpointAddr.IP, + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + } + ipH = ipv4 + networkLayer = ipv4 + dstIP = localHostNetIPv4 + rawConn = p.rawConnIPv4 + } else { + // IPv6 path + if p.rawConnIPv6 == nil { + return fmt.Errorf("IPv6 raw socket not available") + } + ipv6 := &layers.IPv6{ + DstIP: localHostNetIPv6, + SrcIP: endpointAddr.IP, + Version: 6, + HopLimit: 64, + NextHeader: layers.IPProtocolUDP, + } + ipH = ipv6 + networkLayer = ipv6 + dstIP = localHostNetIPv6 + rawConn = p.rawConnIPv6 } + udpH := &layers.UDP{ SrcPort: layers.UDPPort(endpointAddr.Port), DstPort: layers.UDPPort(p.localWGListenPort), } - err := udpH.SetNetworkLayerForChecksum(ipH) - if err != nil { + if err := udpH.SetNetworkLayerForChecksum(networkLayer); err != nil { return fmt.Errorf("set network layer for checksum: %w", err) } layerBuffer := gopacket.NewSerializeBuffer() + payload := gopacket.Payload(data) - err = gopacket.SerializeLayers(layerBuffer, gopacket.SerializeOptions{ComputeChecksums: true, FixLengths: true}, ipH, udpH, payload) - if err != nil { + if err := gopacket.SerializeLayers(layerBuffer, serializeOpts, ipH, udpH, payload); err != nil { return fmt.Errorf("serialize layers: %w", err) } - if _, err = p.rawConn.WriteTo(layerBuffer.Bytes(), &net.IPAddr{IP: localHostNetIP}); err != nil { + + if _, err := rawConn.WriteTo(layerBuffer.Bytes(), &net.IPAddr{IP: dstIP}); err != nil { return fmt.Errorf("write to raw conn: %w", err) } return nil diff --git a/client/iface/wgproxy/ebpf/wrapper.go b/client/iface/wgproxy/ebpf/wrapper.go index ff44d30c0..5b98be7b4 100644 --- a/client/iface/wgproxy/ebpf/wrapper.go +++ b/client/iface/wgproxy/ebpf/wrapper.go @@ -41,7 +41,7 @@ func NewProxyWrapper(proxy *WGEBPFProxy) *ProxyWrapper { closeListener: listener.NewCloseListener(), } } -func (p *ProxyWrapper) AddTurnConn(ctx context.Context, endpoint *net.UDPAddr, remoteConn net.Conn) error { +func (p *ProxyWrapper) AddTurnConn(ctx context.Context, _ *net.UDPAddr, remoteConn net.Conn) error { addr, err := p.wgeBPFProxy.AddTurnConn(remoteConn) if err != nil { return fmt.Errorf("add turn conn: %w", err) @@ -91,6 +91,10 @@ func (p *ProxyWrapper) Pause() { } func (p *ProxyWrapper) RedirectAs(endpoint *net.UDPAddr) { + if endpoint == nil || endpoint.IP == nil { + log.Errorf("failed to start package redirection, endpoint is nil") + return + } p.pausedCond.L.Lock() p.paused = false diff --git a/client/iface/wgproxy/factory_kernel.go b/client/iface/wgproxy/factory_kernel.go index 2714c5774..7821df3de 100644 --- a/client/iface/wgproxy/factory_kernel.go +++ b/client/iface/wgproxy/factory_kernel.go @@ -54,6 +54,14 @@ func (w *KernelFactory) GetProxy() Proxy { return ebpf.NewProxyWrapper(w.ebpfProxy) } +// GetProxyPort returns the eBPF proxy port, or 0 if eBPF is not active. +func (w *KernelFactory) GetProxyPort() uint16 { + if w.ebpfProxy == nil { + return 0 + } + return w.ebpfProxy.GetProxyPort() +} + func (w *KernelFactory) Free() error { if w.ebpfProxy == nil { return nil diff --git a/client/iface/wgproxy/factory_usp.go b/client/iface/wgproxy/factory_usp.go index a1b1c34d7..bbd67e076 100644 --- a/client/iface/wgproxy/factory_usp.go +++ b/client/iface/wgproxy/factory_usp.go @@ -24,6 +24,11 @@ func (w *USPFactory) GetProxy() Proxy { return proxyBind.NewProxyBind(w.bind, w.mtu) } +// GetProxyPort returns 0 as userspace WireGuard doesn't use a separate proxy port. +func (w *USPFactory) GetProxyPort() uint16 { + return 0 +} + func (w *USPFactory) Free() error { return nil } diff --git a/client/iface/wgproxy/rawsocket/rawsocket.go b/client/iface/wgproxy/rawsocket/rawsocket.go index a11ac46d5..bc785b43a 100644 --- a/client/iface/wgproxy/rawsocket/rawsocket.go +++ b/client/iface/wgproxy/rawsocket/rawsocket.go @@ -8,43 +8,87 @@ import ( "os" "syscall" + log "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" + nbnet "github.com/netbirdio/netbird/client/net" ) -func PrepareSenderRawSocket() (net.PacketConn, error) { +// PrepareSenderRawSocketIPv4 creates and configures a raw socket for sending IPv4 packets +func PrepareSenderRawSocketIPv4() (net.PacketConn, error) { + return prepareSenderRawSocket(syscall.AF_INET, true) +} + +// PrepareSenderRawSocketIPv6 creates and configures a raw socket for sending IPv6 packets +func PrepareSenderRawSocketIPv6() (net.PacketConn, error) { + return prepareSenderRawSocket(syscall.AF_INET6, false) +} + +func prepareSenderRawSocket(family int, isIPv4 bool) (net.PacketConn, error) { // Create a raw socket. - fd, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, syscall.IPPROTO_RAW) + fd, err := syscall.Socket(family, syscall.SOCK_RAW, syscall.IPPROTO_RAW) if err != nil { return nil, fmt.Errorf("creating raw socket failed: %w", err) } - // Set the IP_HDRINCL option on the socket to tell the kernel that headers are included in the packet. - err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IP, syscall.IP_HDRINCL, 1) - if err != nil { - return nil, fmt.Errorf("setting IP_HDRINCL failed: %w", err) + // Set the header include option on the socket to tell the kernel that headers are included in the packet. + // For IPv4, we need to set IP_HDRINCL. For IPv6, we need to set IPV6_HDRINCL to accept application-provided IPv6 headers. + if isIPv4 { + err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IP, unix.IP_HDRINCL, 1) + if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } + return nil, fmt.Errorf("setting IP_HDRINCL failed: %w", err) + } + } else { + err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IPV6, unix.IPV6_HDRINCL, 1) + if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } + return nil, fmt.Errorf("setting IPV6_HDRINCL failed: %w", err) + } } // Bind the socket to the "lo" interface. err = syscall.SetsockoptString(fd, syscall.SOL_SOCKET, syscall.SO_BINDTODEVICE, "lo") if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } return nil, fmt.Errorf("binding to lo interface failed: %w", err) } // Set the fwmark on the socket. err = nbnet.SetSocketOpt(fd) if err != nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } return nil, fmt.Errorf("setting fwmark failed: %w", err) } // Convert the file descriptor to a PacketConn. file := os.NewFile(uintptr(fd), fmt.Sprintf("fd %d", fd)) if file == nil { + if closeErr := syscall.Close(fd); closeErr != nil { + log.Warnf("failed to close raw socket fd: %v", closeErr) + } return nil, fmt.Errorf("converting fd to file failed") } packetConn, err := net.FilePacketConn(file) if err != nil { + if closeErr := file.Close(); closeErr != nil { + log.Warnf("failed to close file: %v", closeErr) + } return nil, fmt.Errorf("converting file to packet conn failed: %w", err) } + // Close the original file to release the FD (net.FilePacketConn duplicates it) + if closeErr := file.Close(); closeErr != nil { + log.Warnf("failed to close file after creating packet conn: %v", closeErr) + } + return packetConn, nil } diff --git a/client/iface/wgproxy/redirect_test.go b/client/iface/wgproxy/redirect_test.go new file mode 100644 index 000000000..b52eead25 --- /dev/null +++ b/client/iface/wgproxy/redirect_test.go @@ -0,0 +1,353 @@ +//go:build linux && !android + +package wgproxy + +import ( + "context" + "net" + "testing" + "time" + + "github.com/netbirdio/netbird/client/iface/wgproxy/ebpf" + "github.com/netbirdio/netbird/client/iface/wgproxy/udp" +) + +// compareUDPAddr compares two UDP addresses, ignoring IPv6 zone IDs +// IPv6 link-local addresses include zone IDs (e.g., fe80::1%lo) which we should ignore +func compareUDPAddr(addr1, addr2 net.Addr) bool { + udpAddr1, ok1 := addr1.(*net.UDPAddr) + udpAddr2, ok2 := addr2.(*net.UDPAddr) + + if !ok1 || !ok2 { + return addr1.String() == addr2.String() + } + + // Compare IP and Port, ignoring zone + return udpAddr1.IP.Equal(udpAddr2.IP) && udpAddr1.Port == udpAddr2.Port +} + +// TestRedirectAs_eBPF_IPv4 tests RedirectAs with eBPF proxy using IPv4 addresses +func TestRedirectAs_eBPF_IPv4(t *testing.T) { + wgPort := 51850 + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, 1280) + if err := ebpfProxy.Listen(); err != nil { + t.Fatalf("failed to initialize ebpf proxy: %v", err) + } + defer func() { + if err := ebpfProxy.Free(); err != nil { + t.Errorf("failed to free ebpf proxy: %v", err) + } + }() + + proxy := ebpf.NewProxyWrapper(ebpfProxy) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("192.168.0.56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// TestRedirectAs_eBPF_IPv6 tests RedirectAs with eBPF proxy using IPv6 addresses +func TestRedirectAs_eBPF_IPv6(t *testing.T) { + wgPort := 51851 + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, 1280) + if err := ebpfProxy.Listen(); err != nil { + t.Fatalf("failed to initialize ebpf proxy: %v", err) + } + defer func() { + if err := ebpfProxy.Free(); err != nil { + t.Errorf("failed to free ebpf proxy: %v", err) + } + }() + + proxy := ebpf.NewProxyWrapper(ebpfProxy) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("fe80::56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// TestRedirectAs_UDP_IPv4 tests RedirectAs with UDP proxy using IPv4 addresses +func TestRedirectAs_UDP_IPv4(t *testing.T) { + wgPort := 51852 + proxy := udp.NewWGUDPProxy(wgPort, 1280) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("192.168.0.56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// TestRedirectAs_UDP_IPv6 tests RedirectAs with UDP proxy using IPv6 addresses +func TestRedirectAs_UDP_IPv6(t *testing.T) { + wgPort := 51853 + proxy := udp.NewWGUDPProxy(wgPort, 1280) + + // NetBird UDP address of the remote peer + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + p2pEndpoint := &net.UDPAddr{ + IP: net.ParseIP("fe80::56"), + Port: 51820, + } + + testRedirectAs(t, proxy, wgPort, nbAddr, p2pEndpoint) +} + +// testRedirectAs is a helper function that tests the RedirectAs functionality +// It verifies that: +// 1. Initial traffic from relay connection works +// 2. After calling RedirectAs, packets appear to come from the p2p endpoint +// 3. Multiple packets are correctly redirected with the new source address +func testRedirectAs(t *testing.T, proxy Proxy, wgPort int, nbAddr, p2pEndpoint *net.UDPAddr) { + t.Helper() + + ctx := context.Background() + + // Create WireGuard listeners on both IPv4 and IPv6 to support both P2P connection types + // In reality, WireGuard binds to a port and receives from both IPv4 and IPv6 + wgListener4, err := net.ListenUDP("udp4", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: wgPort, + }) + if err != nil { + t.Fatalf("failed to create IPv4 WireGuard listener: %v", err) + } + defer wgListener4.Close() + + wgListener6, err := net.ListenUDP("udp6", &net.UDPAddr{ + IP: net.ParseIP("::1"), + Port: wgPort, + }) + if err != nil { + t.Fatalf("failed to create IPv6 WireGuard listener: %v", err) + } + defer wgListener6.Close() + + // Determine which listener to use based on the NetBird address IP version + // (this is where initial traffic will come from before RedirectAs is called) + var wgListener *net.UDPConn + if p2pEndpoint.IP.To4() == nil { + wgListener = wgListener6 + } else { + wgListener = wgListener4 + } + + // Create relay server and connection + relayServer, err := net.ListenUDP("udp", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, // Random port + }) + if err != nil { + t.Fatalf("failed to create relay server: %v", err) + } + defer relayServer.Close() + + relayConn, err := net.Dial("udp", relayServer.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to create relay connection: %v", err) + } + defer relayConn.Close() + + // Add TURN connection to proxy + if err := proxy.AddTurnConn(ctx, nbAddr, relayConn); err != nil { + t.Fatalf("failed to add TURN connection: %v", err) + } + defer func() { + if err := proxy.CloseConn(); err != nil { + t.Errorf("failed to close proxy connection: %v", err) + } + }() + + // Start the proxy + proxy.Work() + + // Phase 1: Test initial relay traffic + msgFromRelay := []byte("hello from relay") + if _, err := relayServer.WriteTo(msgFromRelay, relayConn.LocalAddr()); err != nil { + t.Fatalf("failed to write to relay server: %v", err) + } + + // Set read deadline to avoid hanging + if err := wgListener4.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatalf("failed to set read deadline: %v", err) + } + + buf := make([]byte, 1024) + n, _, err := wgListener4.ReadFrom(buf) + if err != nil { + t.Fatalf("failed to read from WireGuard listener: %v", err) + } + + if n != len(msgFromRelay) { + t.Errorf("expected %d bytes, got %d", len(msgFromRelay), n) + } + + if string(buf[:n]) != string(msgFromRelay) { + t.Errorf("expected message %q, got %q", msgFromRelay, buf[:n]) + } + + // Phase 2: Redirect to p2p endpoint + proxy.RedirectAs(p2pEndpoint) + + // Give the proxy a moment to process the redirect + time.Sleep(100 * time.Millisecond) + + // Phase 3: Test redirected traffic + redirectedMessages := [][]byte{ + []byte("redirected message 1"), + []byte("redirected message 2"), + []byte("redirected message 3"), + } + + for i, msg := range redirectedMessages { + if _, err := relayServer.WriteTo(msg, relayConn.LocalAddr()); err != nil { + t.Fatalf("failed to write redirected message %d: %v", i+1, err) + } + + if err := wgListener.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatalf("failed to set read deadline: %v", err) + } + + n, srcAddr, err := wgListener.ReadFrom(buf) + if err != nil { + t.Fatalf("failed to read redirected message %d: %v", i+1, err) + } + + // Verify message content + if string(buf[:n]) != string(msg) { + t.Errorf("message %d: expected %q, got %q", i+1, msg, buf[:n]) + } + + // Verify source address matches p2p endpoint (this is the key test) + // Use compareUDPAddr to ignore IPv6 zone IDs + if !compareUDPAddr(srcAddr, p2pEndpoint) { + t.Errorf("message %d: expected source address %s, got %s", + i+1, p2pEndpoint.String(), srcAddr.String()) + } + } +} + +// TestRedirectAs_Multiple_Switches tests switching between multiple endpoints +func TestRedirectAs_Multiple_Switches(t *testing.T) { + wgPort := 51856 + ebpfProxy := ebpf.NewWGEBPFProxy(wgPort, 1280) + if err := ebpfProxy.Listen(); err != nil { + t.Fatalf("failed to initialize ebpf proxy: %v", err) + } + defer func() { + if err := ebpfProxy.Free(); err != nil { + t.Errorf("failed to free ebpf proxy: %v", err) + } + }() + + proxy := ebpf.NewProxyWrapper(ebpfProxy) + + ctx := context.Background() + + // Create WireGuard listener + wgListener, err := net.ListenUDP("udp4", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: wgPort, + }) + if err != nil { + t.Fatalf("failed to create WireGuard listener: %v", err) + } + defer wgListener.Close() + + // Create relay server and connection + relayServer, err := net.ListenUDP("udp", &net.UDPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 0, + }) + if err != nil { + t.Fatalf("failed to create relay server: %v", err) + } + defer relayServer.Close() + + relayConn, err := net.Dial("udp", relayServer.LocalAddr().String()) + if err != nil { + t.Fatalf("failed to create relay connection: %v", err) + } + defer relayConn.Close() + + nbAddr := &net.UDPAddr{ + IP: net.ParseIP("100.108.111.177"), + Port: 38746, + } + + if err := proxy.AddTurnConn(ctx, nbAddr, relayConn); err != nil { + t.Fatalf("failed to add TURN connection: %v", err) + } + defer func() { + if err := proxy.CloseConn(); err != nil { + t.Errorf("failed to close proxy connection: %v", err) + } + }() + + proxy.Work() + + // Test switching between multiple endpoints - using addresses in local subnet + endpoints := []*net.UDPAddr{ + {IP: net.ParseIP("192.168.0.100"), Port: 51820}, + {IP: net.ParseIP("192.168.0.101"), Port: 51821}, + {IP: net.ParseIP("192.168.0.102"), Port: 51822}, + } + + for i, endpoint := range endpoints { + proxy.RedirectAs(endpoint) + time.Sleep(100 * time.Millisecond) + + msg := []byte("test message") + if _, err := relayServer.WriteTo(msg, relayConn.LocalAddr()); err != nil { + t.Fatalf("failed to write message for endpoint %d: %v", i, err) + } + + buf := make([]byte, 1024) + if err := wgListener.SetReadDeadline(time.Now().Add(2 * time.Second)); err != nil { + t.Fatalf("failed to set read deadline: %v", err) + } + + n, srcAddr, err := wgListener.ReadFrom(buf) + if err != nil { + t.Fatalf("failed to read message for endpoint %d: %v", i, err) + } + + if string(buf[:n]) != string(msg) { + t.Errorf("endpoint %d: expected message %q, got %q", i, msg, buf[:n]) + } + + if !compareUDPAddr(srcAddr, endpoint) { + t.Errorf("endpoint %d: expected source %s, got %s", + i, endpoint.String(), srcAddr.String()) + } + } +} diff --git a/client/iface/wgproxy/udp/proxy.go b/client/iface/wgproxy/udp/proxy.go index 4ef2f19c4..6069d1960 100644 --- a/client/iface/wgproxy/udp/proxy.go +++ b/client/iface/wgproxy/udp/proxy.go @@ -56,7 +56,7 @@ func NewWGUDPProxy(wgPort int, mtu uint16) *WGUDPProxy { // the connection is complete, an error is returned. Once successfully // connected, any expiration of the context will not affect the // connection. -func (p *WGUDPProxy) AddTurnConn(ctx context.Context, endpoint *net.UDPAddr, remoteConn net.Conn) error { +func (p *WGUDPProxy) AddTurnConn(ctx context.Context, _ *net.UDPAddr, remoteConn net.Conn) error { dialer := net.Dialer{} localConn, err := dialer.DialContext(ctx, "udp", fmt.Sprintf(":%d", p.localWGListenPort)) if err != nil { diff --git a/client/iface/wgproxy/udp/rawsocket.go b/client/iface/wgproxy/udp/rawsocket.go index fdc911463..cc099d9df 100644 --- a/client/iface/wgproxy/udp/rawsocket.go +++ b/client/iface/wgproxy/udp/rawsocket.go @@ -19,37 +19,56 @@ var ( FixLengths: true, } - localHostNetIPAddr = &net.IPAddr{ + localHostNetIPAddrV4 = &net.IPAddr{ IP: net.ParseIP("127.0.0.1"), } + localHostNetIPAddrV6 = &net.IPAddr{ + IP: net.ParseIP("::1"), + } ) type SrcFaker struct { srcAddr *net.UDPAddr - rawSocket net.PacketConn - ipH gopacket.SerializableLayer - udpH gopacket.SerializableLayer - layerBuffer gopacket.SerializeBuffer + rawSocket net.PacketConn + ipH gopacket.SerializableLayer + udpH gopacket.SerializableLayer + layerBuffer gopacket.SerializeBuffer + localHostAddr *net.IPAddr } func NewSrcFaker(dstPort int, srcAddr *net.UDPAddr) (*SrcFaker, error) { - rawSocket, err := rawsocket.PrepareSenderRawSocket() + // Create only the raw socket for the address family we need + var rawSocket net.PacketConn + var err error + var localHostAddr *net.IPAddr + + if srcAddr.IP.To4() != nil { + rawSocket, err = rawsocket.PrepareSenderRawSocketIPv4() + localHostAddr = localHostNetIPAddrV4 + } else { + rawSocket, err = rawsocket.PrepareSenderRawSocketIPv6() + localHostAddr = localHostNetIPAddrV6 + } if err != nil { return nil, err } ipH, udpH, err := prepareHeaders(dstPort, srcAddr) if err != nil { + if closeErr := rawSocket.Close(); closeErr != nil { + log.Warnf("failed to close raw socket: %v", closeErr) + } return nil, err } f := &SrcFaker{ - srcAddr: srcAddr, - rawSocket: rawSocket, - ipH: ipH, - udpH: udpH, - layerBuffer: gopacket.NewSerializeBuffer(), + srcAddr: srcAddr, + rawSocket: rawSocket, + ipH: ipH, + udpH: udpH, + layerBuffer: gopacket.NewSerializeBuffer(), + localHostAddr: localHostAddr, } return f, nil @@ -72,7 +91,7 @@ func (f *SrcFaker) SendPkg(data []byte) (int, error) { if err != nil { return 0, fmt.Errorf("serialize layers: %w", err) } - n, err := f.rawSocket.WriteTo(f.layerBuffer.Bytes(), localHostNetIPAddr) + n, err := f.rawSocket.WriteTo(f.layerBuffer.Bytes(), f.localHostAddr) if err != nil { return 0, fmt.Errorf("write to raw conn: %w", err) } @@ -80,19 +99,40 @@ func (f *SrcFaker) SendPkg(data []byte) (int, error) { } func prepareHeaders(dstPort int, srcAddr *net.UDPAddr) (gopacket.SerializableLayer, gopacket.SerializableLayer, error) { - ipH := &layers.IPv4{ - DstIP: net.ParseIP("127.0.0.1"), - SrcIP: srcAddr.IP, - Version: 4, - TTL: 64, - Protocol: layers.IPProtocolUDP, + var ipH gopacket.SerializableLayer + var networkLayer gopacket.NetworkLayer + + // Check if source IP is IPv4 or IPv6 + if srcAddr.IP.To4() != nil { + // IPv4 + ipv4 := &layers.IPv4{ + DstIP: localHostNetIPAddrV4.IP, + SrcIP: srcAddr.IP, + Version: 4, + TTL: 64, + Protocol: layers.IPProtocolUDP, + } + ipH = ipv4 + networkLayer = ipv4 + } else { + // IPv6 + ipv6 := &layers.IPv6{ + DstIP: localHostNetIPAddrV6.IP, + SrcIP: srcAddr.IP, + Version: 6, + HopLimit: 64, + NextHeader: layers.IPProtocolUDP, + } + ipH = ipv6 + networkLayer = ipv6 } + udpH := &layers.UDP{ SrcPort: layers.UDPPort(srcAddr.Port), DstPort: layers.UDPPort(dstPort), // dst is the localhost WireGuard port } - err := udpH.SetNetworkLayerForChecksum(ipH) + err := udpH.SetNetworkLayerForChecksum(networkLayer) if err != nil { return nil, nil, fmt.Errorf("set network layer for checksum: %w", err) } diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go new file mode 100644 index 000000000..44e98bede --- /dev/null +++ b/client/internal/auth/auth.go @@ -0,0 +1,499 @@ +package auth + +import ( + "context" + "net/url" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/client/internal/profilemanager" + "github.com/netbirdio/netbird/client/ssh" + "github.com/netbirdio/netbird/client/system" + mgm "github.com/netbirdio/netbird/shared/management/client" + "github.com/netbirdio/netbird/shared/management/client/common" + mgmProto "github.com/netbirdio/netbird/shared/management/proto" +) + +// Auth manages authentication operations with the management server +// It maintains a long-lived connection and automatically handles reconnection with backoff +type Auth struct { + mutex sync.RWMutex + client *mgm.GrpcClient + config *profilemanager.Config + privateKey wgtypes.Key + mgmURL *url.URL + mgmTLSEnabled bool +} + +// NewAuth creates a new Auth instance that manages authentication flows +// It establishes a connection to the management server that will be reused for all operations +// The connection is automatically recreated with backoff if it becomes disconnected +func NewAuth(ctx context.Context, privateKey string, mgmURL *url.URL, config *profilemanager.Config) (*Auth, error) { + // Validate WireGuard private key + myPrivateKey, err := wgtypes.ParseKey(privateKey) + if err != nil { + return nil, err + } + + // Determine TLS setting based on URL scheme + mgmTLSEnabled := mgmURL.Scheme == "https" + + log.Debugf("connecting to Management Service %s", mgmURL.String()) + mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTLSEnabled) + if err != nil { + log.Errorf("failed connecting to Management Service %s: %v", mgmURL.String(), err) + return nil, err + } + + log.Debugf("connected to the Management service %s", mgmURL.String()) + + return &Auth{ + client: mgmClient, + config: config, + privateKey: myPrivateKey, + mgmURL: mgmURL, + mgmTLSEnabled: mgmTLSEnabled, + }, nil +} + +// Close closes the management client connection +func (a *Auth) Close() error { + a.mutex.Lock() + defer a.mutex.Unlock() + + if a.client == nil { + return nil + } + return a.client.Close() +} + +// IsSSOSupported checks if the management server supports SSO by attempting to retrieve auth flow configurations. +// Returns true if either PKCE or Device authorization flow is supported, false otherwise. +// This function encapsulates the SSO detection logic to avoid exposing gRPC error codes to upper layers. +// Automatically retries with backoff and reconnection on connection errors. +func (a *Auth) IsSSOSupported(ctx context.Context) (bool, error) { + var supportsSSO bool + + err := a.withRetry(ctx, func(client *mgm.GrpcClient) error { + // Try PKCE flow first + _, err := a.getPKCEFlow(client) + if err == nil { + supportsSSO = true + return nil + } + + // Check if PKCE is not supported + if s, ok := status.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + // PKCE not supported, try Device flow + _, err = a.getDeviceFlow(client) + if err == nil { + supportsSSO = true + return nil + } + + // Check if Device flow is also not supported + if s, ok := status.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + // Neither PKCE nor Device flow is supported + supportsSSO = false + return nil + } + + // Device flow check returned an error other than NotFound/Unimplemented + return err + } + + // PKCE flow check returned an error other than NotFound/Unimplemented + return err + }) + + return supportsSSO, err +} + +// GetOAuthFlow returns an OAuth flow (PKCE or Device) using the existing management connection +// This avoids creating a new connection to the management server +func (a *Auth) GetOAuthFlow(ctx context.Context, forceDeviceAuth bool) (OAuthFlow, error) { + var flow OAuthFlow + var err error + + err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { + if forceDeviceAuth { + flow, err = a.getDeviceFlow(client) + return err + } + + // Try PKCE flow first + flow, err = a.getPKCEFlow(client) + if err != nil { + // If PKCE not supported, try Device flow + if s, ok := status.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { + flow, err = a.getDeviceFlow(client) + return err + } + return err + } + return nil + }) + + return flow, err +} + +// IsLoginRequired checks if login is required by attempting to authenticate with the server +// Automatically retries with backoff and reconnection on connection errors. +func (a *Auth) IsLoginRequired(ctx context.Context) (bool, error) { + pubSSHKey, err := ssh.GeneratePublicKey([]byte(a.config.SSHKey)) + if err != nil { + return false, err + } + + var needsLogin bool + + err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { + _, _, err := a.doMgmLogin(client, ctx, pubSSHKey) + if isLoginNeeded(err) { + needsLogin = true + return nil + } + needsLogin = false + return err + }) + + return needsLogin, err +} + +// Login attempts to log in or register the client with the management server +// Returns error and a boolean indicating if it's an authentication error (permission denied) that should stop retries. +// Automatically retries with backoff and reconnection on connection errors. +func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (error, bool) { + pubSSHKey, err := ssh.GeneratePublicKey([]byte(a.config.SSHKey)) + if err != nil { + return err, false + } + + var isAuthError bool + + err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { + serverKey, _, err := a.doMgmLogin(client, ctx, pubSSHKey) + if serverKey != nil && isRegistrationNeeded(err) { + log.Debugf("peer registration required") + _, err = a.registerPeer(client, ctx, setupKey, jwtToken, pubSSHKey) + if err != nil { + isAuthError = isPermissionDenied(err) + return err + } + } else if err != nil { + isAuthError = isPermissionDenied(err) + return err + } + + isAuthError = false + return nil + }) + + return err, isAuthError +} + +// getPKCEFlow retrieves PKCE authorization flow configuration and creates a flow instance +func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, error) { + serverKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, err + } + + protoFlow, err := client.GetPKCEAuthorizationFlow(*serverKey) + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + log.Warnf("server couldn't find pkce flow, contact admin: %v", err) + return nil, err + } + log.Errorf("failed to retrieve pkce flow: %v", err) + return nil, err + } + + protoConfig := protoFlow.GetProviderConfig() + config := &PKCEAuthProviderConfig{ + Audience: protoConfig.GetAudience(), + ClientID: protoConfig.GetClientID(), + ClientSecret: protoConfig.GetClientSecret(), + TokenEndpoint: protoConfig.GetTokenEndpoint(), + AuthorizationEndpoint: protoConfig.GetAuthorizationEndpoint(), + Scope: protoConfig.GetScope(), + RedirectURLs: protoConfig.GetRedirectURLs(), + UseIDToken: protoConfig.GetUseIDToken(), + ClientCertPair: a.config.ClientCertKeyPair, + DisablePromptLogin: protoConfig.GetDisablePromptLogin(), + LoginFlag: common.LoginFlag(protoConfig.GetLoginFlag()), + } + + if err := validatePKCEConfig(config); err != nil { + return nil, err + } + + flow, err := NewPKCEAuthorizationFlow(*config) + if err != nil { + return nil, err + } + + return flow, nil +} + +// getDeviceFlow retrieves device authorization flow configuration and creates a flow instance +func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, error) { + serverKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, err + } + + protoFlow, err := client.GetDeviceAuthorizationFlow(*serverKey) + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { + log.Warnf("server couldn't find device flow, contact admin: %v", err) + return nil, err + } + log.Errorf("failed to retrieve device flow: %v", err) + return nil, err + } + + protoConfig := protoFlow.GetProviderConfig() + config := &DeviceAuthProviderConfig{ + Audience: protoConfig.GetAudience(), + ClientID: protoConfig.GetClientID(), + ClientSecret: protoConfig.GetClientSecret(), + Domain: protoConfig.Domain, + TokenEndpoint: protoConfig.GetTokenEndpoint(), + DeviceAuthEndpoint: protoConfig.GetDeviceAuthEndpoint(), + Scope: protoConfig.GetScope(), + UseIDToken: protoConfig.GetUseIDToken(), + } + + // Keep compatibility with older management versions + if config.Scope == "" { + config.Scope = "openid" + } + + if err := validateDeviceAuthConfig(config); err != nil { + return nil, err + } + + flow, err := NewDeviceAuthorizationFlow(*config) + if err != nil { + return nil, err + } + + return flow, nil +} + +// doMgmLogin performs the actual login operation with the management service +func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) (*wgtypes.Key, *mgmProto.LoginResponse, error) { + serverKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, nil, err + } + + sysInfo := system.GetInfo(ctx) + a.setSystemInfoFlags(sysInfo) + loginResp, err := client.Login(*serverKey, sysInfo, pubSSHKey, a.config.DNSLabels) + return serverKey, loginResp, err +} + +// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key. +// Otherwise tries to register with the provided setupKey via command line. +func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) { + serverPublicKey, err := client.GetServerPublicKey() + if err != nil { + log.Errorf("failed while getting Management Service public key: %v", err) + return nil, err + } + + validSetupKey, err := uuid.Parse(setupKey) + if err != nil && jwtToken == "" { + return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err) + } + + log.Debugf("sending peer registration request to Management Service") + info := system.GetInfo(ctx) + a.setSystemInfoFlags(info) + loginResp, err := client.Register(*serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels) + if err != nil { + log.Errorf("failed registering peer %v", err) + return nil, err + } + + log.Infof("peer has been successfully registered on Management Service") + + return loginResp, nil +} + +// setSystemInfoFlags sets all configuration flags on the provided system info +func (a *Auth) setSystemInfoFlags(info *system.Info) { + info.SetFlags( + a.config.RosenpassEnabled, + a.config.RosenpassPermissive, + a.config.ServerSSHAllowed, + a.config.DisableClientRoutes, + a.config.DisableServerRoutes, + a.config.DisableDNS, + a.config.DisableFirewall, + a.config.BlockLANAccess, + a.config.BlockInbound, + a.config.LazyConnectionEnabled, + a.config.EnableSSHRoot, + a.config.EnableSSHSFTP, + a.config.EnableSSHLocalPortForwarding, + a.config.EnableSSHRemotePortForwarding, + a.config.DisableSSHAuth, + ) +} + +// reconnect closes the current connection and creates a new one +// It checks if the brokenClient is still the current client before reconnecting +// to avoid multiple threads reconnecting unnecessarily +func (a *Auth) reconnect(ctx context.Context, brokenClient *mgm.GrpcClient) error { + a.mutex.Lock() + defer a.mutex.Unlock() + + // Double-check: if client has already been replaced by another thread, skip reconnection + if a.client != brokenClient { + log.Debugf("client already reconnected by another thread, skipping") + return nil + } + + // Create new connection FIRST, before closing the old one + // This ensures a.client is never nil, preventing panics in other threads + log.Debugf("reconnecting to Management Service %s", a.mgmURL.String()) + mgmClient, err := mgm.NewClient(ctx, a.mgmURL.Host, a.privateKey, a.mgmTLSEnabled) + if err != nil { + log.Errorf("failed reconnecting to Management Service %s: %v", a.mgmURL.String(), err) + // Keep the old client if reconnection fails + return err + } + + // Close old connection AFTER new one is successfully created + oldClient := a.client + a.client = mgmClient + + if oldClient != nil { + if err := oldClient.Close(); err != nil { + log.Debugf("error closing old connection: %v", err) + } + } + + log.Debugf("successfully reconnected to Management service %s", a.mgmURL.String()) + return nil +} + +// isConnectionError checks if the error is a connection-related error that should trigger reconnection +func isConnectionError(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + // These error codes indicate connection issues + return s.Code() == codes.Unavailable || + s.Code() == codes.DeadlineExceeded || + s.Code() == codes.Canceled || + s.Code() == codes.Internal +} + +// withRetry wraps an operation with exponential backoff retry logic +// It automatically reconnects on connection errors +func (a *Auth) withRetry(ctx context.Context, operation func(client *mgm.GrpcClient) error) error { + backoffSettings := &backoff.ExponentialBackOff{ + InitialInterval: 500 * time.Millisecond, + RandomizationFactor: 0.5, + Multiplier: 1.5, + MaxInterval: 10 * time.Second, + MaxElapsedTime: 2 * time.Minute, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + backoffSettings.Reset() + + return backoff.RetryNotify( + func() error { + // Capture the client BEFORE the operation to ensure we track the correct client + a.mutex.RLock() + currentClient := a.client + a.mutex.RUnlock() + + if currentClient == nil { + return status.Errorf(codes.Unavailable, "client is not initialized") + } + + // Execute operation with the captured client + err := operation(currentClient) + if err == nil { + return nil + } + + // If it's a connection error, attempt reconnection using the client that was actually used + if isConnectionError(err) { + log.Warnf("connection error detected, attempting reconnection: %v", err) + + if reconnectErr := a.reconnect(ctx, currentClient); reconnectErr != nil { + log.Errorf("reconnection failed: %v", reconnectErr) + return reconnectErr + } + // Return the original error to trigger retry with the new connection + return err + } + + // For authentication errors, don't retry + if isAuthenticationError(err) { + return backoff.Permanent(err) + } + + return err + }, + backoff.WithContext(backoffSettings, ctx), + func(err error, duration time.Duration) { + log.Warnf("operation failed, retrying in %v: %v", duration, err) + }, + ) +} + +// isAuthenticationError checks if the error is an authentication-related error that should not be retried. +// Returns true if the error is InvalidArgument or PermissionDenied, indicating that retrying won't help. +func isAuthenticationError(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + return s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied +} + +// isPermissionDenied checks if the error is a PermissionDenied error. +// This is used to determine if early exit from backoff is needed (e.g., when the server responded but denied access). +func isPermissionDenied(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + return s.Code() == codes.PermissionDenied +} + +func isLoginNeeded(err error) bool { + return isAuthenticationError(err) +} + +func isRegistrationNeeded(err error) bool { + return isPermissionDenied(err) +} diff --git a/client/internal/auth/device_flow.go b/client/internal/auth/device_flow.go index 8ca760742..e33765300 100644 --- a/client/internal/auth/device_flow.go +++ b/client/internal/auth/device_flow.go @@ -15,7 +15,6 @@ import ( log "github.com/sirupsen/logrus" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/util/embeddedroots" ) @@ -26,12 +25,56 @@ const ( var _ OAuthFlow = &DeviceAuthorizationFlow{} +// DeviceAuthProviderConfig has all attributes needed to initiate a device authorization flow +type DeviceAuthProviderConfig struct { + // ClientID An IDP application client id + ClientID string + // ClientSecret An IDP application client secret + ClientSecret string + // Domain An IDP API domain + // Deprecated. Use OIDCConfigEndpoint instead + Domain string + // Audience An Audience for to authorization validation + Audience string + // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token + TokenEndpoint string + // DeviceAuthEndpoint is the endpoint of an IDP manager where clients can obtain device authorization code + DeviceAuthEndpoint string + // Scopes provides the scopes to be included in the token request + Scope string + // UseIDToken indicates if the id token should be used for authentication + UseIDToken bool + // LoginHint is used to pre-fill the email/username field during authentication + LoginHint string +} + +// validateDeviceAuthConfig validates device authorization provider configuration +func validateDeviceAuthConfig(config *DeviceAuthProviderConfig) error { + errorMsgFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" + + if config.Audience == "" { + return fmt.Errorf(errorMsgFormat, "Audience") + } + if config.ClientID == "" { + return fmt.Errorf(errorMsgFormat, "Client ID") + } + if config.TokenEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Token Endpoint") + } + if config.DeviceAuthEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Device Auth Endpoint") + } + if config.Scope == "" { + return fmt.Errorf(errorMsgFormat, "Device Auth Scopes") + } + return nil +} + // DeviceAuthorizationFlow implements the OAuthFlow interface, // for the Device Authorization Flow. type DeviceAuthorizationFlow struct { - providerConfig internal.DeviceAuthProviderConfig - - HTTPClient HTTPClient + providerConfig DeviceAuthProviderConfig + HTTPClient HTTPClient } // RequestDeviceCodePayload used for request device code payload for auth0 @@ -57,7 +100,7 @@ type TokenRequestResponse struct { } // NewDeviceAuthorizationFlow returns device authorization flow client -func NewDeviceAuthorizationFlow(config internal.DeviceAuthProviderConfig) (*DeviceAuthorizationFlow, error) { +func NewDeviceAuthorizationFlow(config DeviceAuthProviderConfig) (*DeviceAuthorizationFlow, error) { httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 @@ -89,6 +132,11 @@ func (d *DeviceAuthorizationFlow) GetClientID(ctx context.Context) string { return d.providerConfig.ClientID } +// SetLoginHint sets the login hint for the device authorization flow +func (d *DeviceAuthorizationFlow) SetLoginHint(hint string) { + d.providerConfig.LoginHint = hint +} + // RequestAuthInfo requests a device code login flow information from Hosted func (d *DeviceAuthorizationFlow) RequestAuthInfo(ctx context.Context) (AuthFlowInfo, error) { form := url.Values{} @@ -199,14 +247,22 @@ func (d *DeviceAuthorizationFlow) requestToken(info AuthFlowInfo) (TokenRequestR } // WaitToken waits user's login and authorize the app. Once the user's authorize -// it retrieves the access token from Hosted's endpoint and validates it before returning +// it retrieves the access token from Hosted's endpoint and validates it before returning. +// The method creates a timeout context internally based on info.ExpiresIn. func (d *DeviceAuthorizationFlow) WaitToken(ctx context.Context, info AuthFlowInfo) (TokenInfo, error) { + // Create timeout context based on flow expiration + timeout := time.Duration(info.ExpiresIn) * time.Second + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + interval := time.Duration(info.Interval) * time.Second ticker := time.NewTicker(interval) + defer ticker.Stop() + for { select { - case <-ctx.Done(): - return TokenInfo{}, ctx.Err() + case <-waitCtx.Done(): + return TokenInfo{}, waitCtx.Err() case <-ticker.C: tokenResponse, err := d.requestToken(info) diff --git a/client/internal/auth/device_flow_test.go b/client/internal/auth/device_flow_test.go index 466645ee9..6a433cb61 100644 --- a/client/internal/auth/device_flow_test.go +++ b/client/internal/auth/device_flow_test.go @@ -12,8 +12,6 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/client/internal" ) type mockHTTPClient struct { @@ -115,18 +113,19 @@ func TestHosted_RequestDeviceCode(t *testing.T) { err: testCase.inputReqError, } - deviceFlow := &DeviceAuthorizationFlow{ - providerConfig: internal.DeviceAuthProviderConfig{ - Audience: expectedAudience, - ClientID: expectedClientID, - Scope: expectedScope, - TokenEndpoint: "test.hosted.com/token", - DeviceAuthEndpoint: "test.hosted.com/device/auth", - UseIDToken: false, - }, - HTTPClient: &httpClient, + config := DeviceAuthProviderConfig{ + Audience: expectedAudience, + ClientID: expectedClientID, + Scope: expectedScope, + TokenEndpoint: "test.hosted.com/token", + DeviceAuthEndpoint: "test.hosted.com/device/auth", + UseIDToken: false, } + deviceFlow, err := NewDeviceAuthorizationFlow(config) + require.NoError(t, err, "creating device flow should not fail") + deviceFlow.HTTPClient = &httpClient + authInfo, err := deviceFlow.RequestAuthInfo(context.TODO()) testCase.testingErrFunc(t, err, testCase.expectedErrorMSG) @@ -280,18 +279,19 @@ func TestHosted_WaitToken(t *testing.T) { countResBody: testCase.inputCountResBody, } - deviceFlow := DeviceAuthorizationFlow{ - providerConfig: internal.DeviceAuthProviderConfig{ - Audience: testCase.inputAudience, - ClientID: clientID, - TokenEndpoint: "test.hosted.com/token", - DeviceAuthEndpoint: "test.hosted.com/device/auth", - Scope: "openid", - UseIDToken: false, - }, - HTTPClient: &httpClient, + config := DeviceAuthProviderConfig{ + Audience: testCase.inputAudience, + ClientID: clientID, + TokenEndpoint: "test.hosted.com/token", + DeviceAuthEndpoint: "test.hosted.com/device/auth", + Scope: "openid", + UseIDToken: false, } + deviceFlow, err := NewDeviceAuthorizationFlow(config) + require.NoError(t, err, "creating device flow should not fail") + deviceFlow.HTTPClient = &httpClient + ctx, cancel := context.WithTimeout(context.TODO(), testCase.inputTimeout) defer cancel() tokenInfo, err := deviceFlow.WaitToken(ctx, testCase.inputInfo) diff --git a/client/internal/auth/oauth.go b/client/internal/auth/oauth.go index 85a166005..a50a2ce6f 100644 --- a/client/internal/auth/oauth.go +++ b/client/internal/auth/oauth.go @@ -10,7 +10,6 @@ import ( "google.golang.org/grpc/codes" gstatus "google.golang.org/grpc/status" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/profilemanager" ) @@ -87,19 +86,33 @@ func NewOAuthFlow(ctx context.Context, config *profilemanager.Config, isUnixDesk // authenticateWithPKCEFlow initializes the Proof Key for Code Exchange flow auth flow func authenticateWithPKCEFlow(ctx context.Context, config *profilemanager.Config, hint string) (OAuthFlow, error) { - pkceFlowInfo, err := internal.GetPKCEAuthorizationFlowInfo(ctx, config.PrivateKey, config.ManagementURL, config.ClientCertKeyPair) + authClient, err := NewAuth(ctx, config.PrivateKey, config.ManagementURL, config) + if err != nil { + return nil, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + + pkceFlowInfo, err := authClient.getPKCEFlow(authClient.client) if err != nil { return nil, fmt.Errorf("getting pkce authorization flow info failed with error: %v", err) } - pkceFlowInfo.ProviderConfig.LoginHint = hint + if hint != "" { + pkceFlowInfo.SetLoginHint(hint) + } - return NewPKCEAuthorizationFlow(pkceFlowInfo.ProviderConfig) + return pkceFlowInfo, nil } // authenticateWithDeviceCodeFlow initializes the Device Code auth Flow func authenticateWithDeviceCodeFlow(ctx context.Context, config *profilemanager.Config, hint string) (OAuthFlow, error) { - deviceFlowInfo, err := internal.GetDeviceAuthorizationFlowInfo(ctx, config.PrivateKey, config.ManagementURL) + authClient, err := NewAuth(ctx, config.PrivateKey, config.ManagementURL, config) + if err != nil { + return nil, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + + deviceFlowInfo, err := authClient.getDeviceFlow(authClient.client) if err != nil { switch s, ok := gstatus.FromError(err); { case ok && s.Code() == codes.NotFound: @@ -114,7 +127,9 @@ func authenticateWithDeviceCodeFlow(ctx context.Context, config *profilemanager. } } - deviceFlowInfo.ProviderConfig.LoginHint = hint + if hint != "" { + deviceFlowInfo.SetLoginHint(hint) + } - return NewDeviceAuthorizationFlow(deviceFlowInfo.ProviderConfig) + return deviceFlowInfo, nil } diff --git a/client/internal/auth/pkce_flow.go b/client/internal/auth/pkce_flow.go index cc43c8648..2e16836d8 100644 --- a/client/internal/auth/pkce_flow.go +++ b/client/internal/auth/pkce_flow.go @@ -20,7 +20,6 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/oauth2" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/templates" "github.com/netbirdio/netbird/shared/management/client/common" ) @@ -35,17 +34,67 @@ const ( defaultPKCETimeoutSeconds = 300 ) +// PKCEAuthProviderConfig has all attributes needed to initiate PKCE authorization flow +type PKCEAuthProviderConfig struct { + // ClientID An IDP application client id + ClientID string + // ClientSecret An IDP application client secret + ClientSecret string + // Audience An Audience for to authorization validation + Audience string + // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token + TokenEndpoint string + // AuthorizationEndpoint is the endpoint of an IDP manager where clients can obtain authorization code + AuthorizationEndpoint string + // Scopes provides the scopes to be included in the token request + Scope string + // RedirectURL handles authorization code from IDP manager + RedirectURLs []string + // UseIDToken indicates if the id token should be used for authentication + UseIDToken bool + // ClientCertPair is used for mTLS authentication to the IDP + ClientCertPair *tls.Certificate + // DisablePromptLogin makes the PKCE flow to not prompt the user for login + DisablePromptLogin bool + // LoginFlag is used to configure the PKCE flow login behavior + LoginFlag common.LoginFlag + // LoginHint is used to pre-fill the email/username field during authentication + LoginHint string +} + +// validatePKCEConfig validates PKCE provider configuration +func validatePKCEConfig(config *PKCEAuthProviderConfig) error { + errorMsgFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" + + if config.ClientID == "" { + return fmt.Errorf(errorMsgFormat, "Client ID") + } + if config.TokenEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Token Endpoint") + } + if config.AuthorizationEndpoint == "" { + return fmt.Errorf(errorMsgFormat, "Authorization Auth Endpoint") + } + if config.Scope == "" { + return fmt.Errorf(errorMsgFormat, "PKCE Auth Scopes") + } + if config.RedirectURLs == nil { + return fmt.Errorf(errorMsgFormat, "PKCE Redirect URLs") + } + return nil +} + // PKCEAuthorizationFlow implements the OAuthFlow interface for // the Authorization Code Flow with PKCE. type PKCEAuthorizationFlow struct { - providerConfig internal.PKCEAuthProviderConfig + providerConfig PKCEAuthProviderConfig state string codeVerifier string oAuthConfig *oauth2.Config } // NewPKCEAuthorizationFlow returns new PKCE authorization code flow. -func NewPKCEAuthorizationFlow(config internal.PKCEAuthProviderConfig) (*PKCEAuthorizationFlow, error) { +func NewPKCEAuthorizationFlow(config PKCEAuthProviderConfig) (*PKCEAuthorizationFlow, error) { var availableRedirectURL string excludedRanges := getSystemExcludedPortRanges() @@ -124,10 +173,21 @@ func (p *PKCEAuthorizationFlow) RequestAuthInfo(ctx context.Context) (AuthFlowIn }, nil } +// SetLoginHint sets the login hint for the PKCE authorization flow +func (p *PKCEAuthorizationFlow) SetLoginHint(hint string) { + p.providerConfig.LoginHint = hint +} + // WaitToken waits for the OAuth token in the PKCE Authorization Flow. // It starts an HTTP server to receive the OAuth token callback and waits for the token or an error. // Once the token is received, it is converted to TokenInfo and validated before returning. -func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, _ AuthFlowInfo) (TokenInfo, error) { +// The method creates a timeout context internally based on info.ExpiresIn. +func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, info AuthFlowInfo) (TokenInfo, error) { + // Create timeout context based on flow expiration + timeout := time.Duration(info.ExpiresIn) * time.Second + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + tokenChan := make(chan *oauth2.Token, 1) errChan := make(chan error, 1) @@ -138,7 +198,7 @@ func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, _ AuthFlowInfo) ( server := &http.Server{Addr: fmt.Sprintf(":%s", parsedURL.Port())} defer func() { - shutdownCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := server.Shutdown(shutdownCtx); err != nil { @@ -149,8 +209,8 @@ func (p *PKCEAuthorizationFlow) WaitToken(ctx context.Context, _ AuthFlowInfo) ( go p.startServer(server, tokenChan, errChan) select { - case <-ctx.Done(): - return TokenInfo{}, ctx.Err() + case <-waitCtx.Done(): + return TokenInfo{}, waitCtx.Err() case token := <-tokenChan: return p.parseOAuthToken(token) case err := <-errChan: diff --git a/client/internal/auth/pkce_flow_test.go b/client/internal/auth/pkce_flow_test.go index b77a17eaa..c487c13df 100644 --- a/client/internal/auth/pkce_flow_test.go +++ b/client/internal/auth/pkce_flow_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netbirdio/netbird/client/internal" mgm "github.com/netbirdio/netbird/shared/management/client/common" ) @@ -50,7 +49,7 @@ func TestPromptLogin(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - config := internal.PKCEAuthProviderConfig{ + config := PKCEAuthProviderConfig{ ClientID: "test-client-id", Audience: "test-audience", TokenEndpoint: "https://test-token-endpoint.com/token", diff --git a/client/internal/auth/pkce_flow_windows_test.go b/client/internal/auth/pkce_flow_windows_test.go index dd455b2fe..125eb270a 100644 --- a/client/internal/auth/pkce_flow_windows_test.go +++ b/client/internal/auth/pkce_flow_windows_test.go @@ -9,8 +9,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/netbirdio/netbird/client/internal" ) func TestParseExcludedPortRanges(t *testing.T) { @@ -95,7 +93,7 @@ func TestNewPKCEAuthorizationFlow_WithActualExcludedPorts(t *testing.T) { availablePort := 65432 - config := internal.PKCEAuthProviderConfig{ + config := PKCEAuthProviderConfig{ ClientID: "test-client-id", Audience: "test-audience", TokenEndpoint: "https://test-token-endpoint.com/token", diff --git a/client/internal/connect.go b/client/internal/connect.go index 65637c073..7fc3c9a96 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -59,7 +59,6 @@ func NewConnectClient( config *profilemanager.Config, statusRecorder *peer.Status, doInitalAutoUpdate bool, - ) *ConnectClient { return &ConnectClient{ ctx: ctx, @@ -71,8 +70,8 @@ func NewConnectClient( } // Run with main logic. -func (c *ConnectClient) Run(runningChan chan struct{}) error { - return c.run(MobileDependency{}, runningChan) +func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error { + return c.run(MobileDependency{}, runningChan, logPath) } // RunOnAndroid with main logic on mobile system @@ -93,7 +92,7 @@ func (c *ConnectClient) RunOnAndroid( DnsReadyListener: dnsReadyListener, StateFilePath: stateFilePath, } - return c.run(mobileDependency, nil) + return c.run(mobileDependency, nil, "") } func (c *ConnectClient) RunOniOS( @@ -111,10 +110,10 @@ func (c *ConnectClient) RunOniOS( DnsManager: dnsManager, StateFilePath: stateFilePath, } - return c.run(mobileDependency, nil) + return c.run(mobileDependency, nil, "") } -func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan struct{}) error { +func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan struct{}, logPath string) error { defer func() { if r := recover(); r != nil { rec := c.statusRecorder @@ -284,7 +283,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan relayURLs, token := parseRelayInfo(loginResp) peerConfig := loginResp.GetPeerConfig() - engineConfig, err := createEngineConfig(myPrivateKey, c.config, peerConfig) + engineConfig, err := createEngineConfig(myPrivateKey, c.config, peerConfig, logPath) if err != nil { log.Error(err) return wrapErr(err) @@ -472,7 +471,7 @@ func (c *ConnectClient) SetSyncResponsePersistence(enabled bool) { } // createEngineConfig converts configuration received from Management Service to EngineConfig -func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConfig *mgmProto.PeerConfig) (*EngineConfig, error) { +func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConfig *mgmProto.PeerConfig, logPath string) (*EngineConfig, error) { nm := false if config.NetworkMonitor != nil { nm = *config.NetworkMonitor @@ -507,7 +506,10 @@ func createEngineConfig(key wgtypes.Key, config *profilemanager.Config, peerConf LazyConnectionEnabled: config.LazyConnectionEnabled, - MTU: selectMTU(config.MTU, peerConfig.Mtu), + MTU: selectMTU(config.MTU, peerConfig.Mtu), + LogPath: logPath, + + ProfileConfig: config, } if config.PreSharedKey != "" { diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index 01a0377a5..0f8243e7a 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -28,8 +28,10 @@ import ( "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/updatemanager/installer" + nbstatus "github.com/netbirdio/netbird/client/status" mgmProto "github.com/netbirdio/netbird/shared/management/proto" "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/version" ) const readmeContent = `Netbird debug bundle @@ -57,6 +59,7 @@ block.prof: Block profiling information. heap.prof: Heap profiling information (snapshot of memory allocations). allocs.prof: Allocations profiling information. threadcreate.prof: Thread creation profiling information. +cpu.prof: CPU profiling information. stack_trace.txt: Complete stack traces of all goroutines at the time of bundle creation. @@ -223,10 +226,11 @@ type BundleGenerator struct { internalConfig *profilemanager.Config statusRecorder *peer.Status syncResponse *mgmProto.SyncResponse - logFile string + logPath string + cpuProfile []byte + refreshStatus func() // Optional callback to refresh status before bundle generation anonymize bool - clientStatus string includeSystemInfo bool logFileCount uint32 @@ -235,7 +239,6 @@ type BundleGenerator struct { type BundleConfig struct { Anonymize bool - ClientStatus string IncludeSystemInfo bool LogFileCount uint32 } @@ -244,7 +247,9 @@ type GeneratorDependencies struct { InternalConfig *profilemanager.Config StatusRecorder *peer.Status SyncResponse *mgmProto.SyncResponse - LogFile string + LogPath string + CPUProfile []byte + RefreshStatus func() // Optional callback to refresh status before bundle generation } func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator { @@ -260,10 +265,11 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen internalConfig: deps.InternalConfig, statusRecorder: deps.StatusRecorder, syncResponse: deps.SyncResponse, - logFile: deps.LogFile, + logPath: deps.LogPath, + cpuProfile: deps.CPUProfile, + refreshStatus: deps.RefreshStatus, anonymize: cfg.Anonymize, - clientStatus: cfg.ClientStatus, includeSystemInfo: cfg.IncludeSystemInfo, logFileCount: logFileCount, } @@ -309,13 +315,6 @@ func (g *BundleGenerator) createArchive() error { return fmt.Errorf("add status: %w", err) } - if g.statusRecorder != nil { - status := g.statusRecorder.GetFullStatus() - seedFromStatus(g.anonymizer, &status) - } else { - log.Debugf("no status recorder available for seeding") - } - if err := g.addConfig(); err != nil { log.Errorf("failed to add config to debug bundle: %v", err) } @@ -332,6 +331,10 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add profiles to debug bundle: %v", err) } + if err := g.addCPUProfile(); err != nil { + log.Errorf("failed to add CPU profile to debug bundle: %v", err) + } + if err := g.addStackTrace(); err != nil { log.Errorf("failed to add stack trace to debug bundle: %v", err) } @@ -352,7 +355,7 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add wg show output: %v", err) } - if g.logFile != "" && !slices.Contains(util.SpecialLogs, g.logFile) { + if g.logPath != "" && !slices.Contains(util.SpecialLogs, g.logPath) { if err := g.addLogfile(); err != nil { log.Errorf("failed to add log file to debug bundle: %v", err) if err := g.trySystemdLogFallback(); err != nil { @@ -401,11 +404,30 @@ func (g *BundleGenerator) addReadme() error { } func (g *BundleGenerator) addStatus() error { - if status := g.clientStatus; status != "" { - statusReader := strings.NewReader(status) + if g.statusRecorder != nil { + pm := profilemanager.NewProfileManager() + var profName string + if activeProf, err := pm.GetActiveProfile(); err == nil { + profName = activeProf.Name + } + + if g.refreshStatus != nil { + g.refreshStatus() + } + + fullStatus := g.statusRecorder.GetFullStatus() + protoFullStatus := nbstatus.ToProtoFullStatus(fullStatus) + protoFullStatus.Events = g.statusRecorder.GetEventHistory() + overview := nbstatus.ConvertToStatusOutputOverview(protoFullStatus, g.anonymize, version.NetbirdVersion(), "", nil, nil, nil, "", profName) + statusOutput := overview.FullDetailSummary() + + statusReader := strings.NewReader(statusOutput) if err := g.addFileToZip(statusReader, "status.txt"); err != nil { return fmt.Errorf("add status file to zip: %w", err) } + seedFromStatus(g.anonymizer, &fullStatus) + } else { + log.Debugf("no status recorder available for seeding") } return nil } @@ -535,6 +557,19 @@ func (g *BundleGenerator) addProf() (err error) { return nil } +func (g *BundleGenerator) addCPUProfile() error { + if len(g.cpuProfile) == 0 { + return nil + } + + reader := bytes.NewReader(g.cpuProfile) + if err := g.addFileToZip(reader, "cpu.prof"); err != nil { + return fmt.Errorf("add CPU profile to zip: %w", err) + } + + return nil +} + func (g *BundleGenerator) addStackTrace() error { buf := make([]byte, 5242880) // 5 MB buffer n := runtime.Stack(buf, true) @@ -710,14 +745,14 @@ func (g *BundleGenerator) addCorruptedStateFiles() error { } func (g *BundleGenerator) addLogfile() error { - if g.logFile == "" { + if g.logPath == "" { log.Debugf("skipping empty log file in debug bundle") return nil } - logDir := filepath.Dir(g.logFile) + logDir := filepath.Dir(g.logPath) - if err := g.addSingleLogfile(g.logFile, clientLogFile); err != nil { + if err := g.addSingleLogfile(g.logPath, clientLogFile); err != nil { return fmt.Errorf("add client log file to zip: %w", err) } diff --git a/client/internal/debug/upload.go b/client/internal/debug/upload.go new file mode 100644 index 000000000..cdf52409d --- /dev/null +++ b/client/internal/debug/upload.go @@ -0,0 +1,101 @@ +package debug + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + + "github.com/netbirdio/netbird/upload-server/types" +) + +const maxBundleUploadSize = 50 * 1024 * 1024 + +func UploadDebugBundle(ctx context.Context, url, managementURL, filePath string) (key string, err error) { + response, err := getUploadURL(ctx, url, managementURL) + if err != nil { + return "", err + } + + err = upload(ctx, filePath, response) + if err != nil { + return "", err + } + return response.Key, nil +} + +func upload(ctx context.Context, filePath string, response *types.GetURLResponse) error { + fileData, err := os.Open(filePath) + if err != nil { + return fmt.Errorf("open file: %w", err) + } + + defer fileData.Close() + + stat, err := fileData.Stat() + if err != nil { + return fmt.Errorf("stat file: %w", err) + } + + if stat.Size() > maxBundleUploadSize { + return fmt.Errorf("file size exceeds maximum limit of %d bytes", maxBundleUploadSize) + } + + req, err := http.NewRequestWithContext(ctx, "PUT", response.URL, fileData) + if err != nil { + return fmt.Errorf("create PUT request: %w", err) + } + + req.ContentLength = stat.Size() + req.Header.Set("Content-Type", "application/octet-stream") + + putResp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("upload failed: %v", err) + } + defer putResp.Body.Close() + + if putResp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(putResp.Body) + return fmt.Errorf("upload status %d: %s", putResp.StatusCode, string(body)) + } + return nil +} + +func getUploadURL(ctx context.Context, url string, managementURL string) (*types.GetURLResponse, error) { + id := getURLHash(managementURL) + getReq, err := http.NewRequestWithContext(ctx, "GET", url+"?id="+id, nil) + if err != nil { + return nil, fmt.Errorf("create GET request: %w", err) + } + + getReq.Header.Set(types.ClientHeader, types.ClientHeaderValue) + + resp, err := http.DefaultClient.Do(getReq) + if err != nil { + return nil, fmt.Errorf("get presigned URL: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("get presigned URL status %d: %s", resp.StatusCode, string(body)) + } + + urlBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read response body: %w", err) + } + var response types.GetURLResponse + if err := json.Unmarshal(urlBytes, &response); err != nil { + return nil, fmt.Errorf("unmarshal response: %w", err) + } + return &response, nil +} + +func getURLHash(url string) string { + return fmt.Sprintf("%x", sha256.Sum256([]byte(url))) +} diff --git a/client/server/debug_test.go b/client/internal/debug/upload_test.go similarity index 93% rename from client/server/debug_test.go rename to client/internal/debug/upload_test.go index 53d9ac8ed..e833c196d 100644 --- a/client/server/debug_test.go +++ b/client/internal/debug/upload_test.go @@ -1,4 +1,4 @@ -package server +package debug import ( "context" @@ -38,7 +38,7 @@ func TestUpload(t *testing.T) { fileContent := []byte("test file content") err := os.WriteFile(file, fileContent, 0640) require.NoError(t, err) - key, err := uploadDebugBundle(context.Background(), testURL+types.GetURLPath, testURL, file) + key, err := UploadDebugBundle(context.Background(), testURL+types.GetURLPath, testURL, file) require.NoError(t, err) id := getURLHash(testURL) require.Contains(t, key, id+"/") diff --git a/client/internal/debug/wgshow.go b/client/internal/debug/wgshow.go index 8233ca510..1e8a8a6cc 100644 --- a/client/internal/debug/wgshow.go +++ b/client/internal/debug/wgshow.go @@ -60,7 +60,7 @@ func (g *BundleGenerator) toWGShowFormat(s *configurer.Stats) string { } sb.WriteString(fmt.Sprintf(" latest handshake: %s\n", peer.LastHandshake.Format(time.RFC1123))) sb.WriteString(fmt.Sprintf(" transfer: %d B received, %d B sent\n", peer.RxBytes, peer.TxBytes)) - if peer.PresharedKey { + if peer.PresharedKey != [32]byte{} { sb.WriteString(" preshared key: (hidden)\n") } } diff --git a/client/internal/device_auth.go b/client/internal/device_auth.go deleted file mode 100644 index 7f7d06130..000000000 --- a/client/internal/device_auth.go +++ /dev/null @@ -1,136 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "net/url" - - log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - mgm "github.com/netbirdio/netbird/shared/management/client" -) - -// DeviceAuthorizationFlow represents Device Authorization Flow information -type DeviceAuthorizationFlow struct { - Provider string - ProviderConfig DeviceAuthProviderConfig -} - -// DeviceAuthProviderConfig has all attributes needed to initiate a device authorization flow -type DeviceAuthProviderConfig struct { - // ClientID An IDP application client id - ClientID string - // ClientSecret An IDP application client secret - ClientSecret string - // Domain An IDP API domain - // Deprecated. Use OIDCConfigEndpoint instead - Domain string - // Audience An Audience for to authorization validation - Audience string - // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token - TokenEndpoint string - // DeviceAuthEndpoint is the endpoint of an IDP manager where clients can obtain device authorization code - DeviceAuthEndpoint string - // Scopes provides the scopes to be included in the token request - Scope string - // UseIDToken indicates if the id token should be used for authentication - UseIDToken bool - // LoginHint is used to pre-fill the email/username field during authentication - LoginHint string -} - -// GetDeviceAuthorizationFlowInfo initialize a DeviceAuthorizationFlow instance and return with it -func GetDeviceAuthorizationFlowInfo(ctx context.Context, privateKey string, mgmURL *url.URL) (DeviceAuthorizationFlow, error) { - // validate our peer's Wireguard PRIVATE key - myPrivateKey, err := wgtypes.ParseKey(privateKey) - if err != nil { - log.Errorf("failed parsing Wireguard key %s: [%s]", privateKey, err.Error()) - return DeviceAuthorizationFlow{}, err - } - - var mgmTLSEnabled bool - if mgmURL.Scheme == "https" { - mgmTLSEnabled = true - } - - log.Debugf("connecting to Management Service %s", mgmURL.String()) - mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTLSEnabled) - if err != nil { - log.Errorf("failed connecting to Management Service %s %v", mgmURL.String(), err) - return DeviceAuthorizationFlow{}, err - } - log.Debugf("connected to the Management service %s", mgmURL.String()) - - defer func() { - err = mgmClient.Close() - if err != nil { - log.Warnf("failed to close the Management service client %v", err) - } - }() - - serverKey, err := mgmClient.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return DeviceAuthorizationFlow{}, err - } - - protoDeviceAuthorizationFlow, err := mgmClient.GetDeviceAuthorizationFlow(*serverKey) - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - log.Warnf("server couldn't find device flow, contact admin: %v", err) - return DeviceAuthorizationFlow{}, err - } - log.Errorf("failed to retrieve device flow: %v", err) - return DeviceAuthorizationFlow{}, err - } - - deviceAuthorizationFlow := DeviceAuthorizationFlow{ - Provider: protoDeviceAuthorizationFlow.Provider.String(), - - ProviderConfig: DeviceAuthProviderConfig{ - Audience: protoDeviceAuthorizationFlow.GetProviderConfig().GetAudience(), - ClientID: protoDeviceAuthorizationFlow.GetProviderConfig().GetClientID(), - ClientSecret: protoDeviceAuthorizationFlow.GetProviderConfig().GetClientSecret(), - Domain: protoDeviceAuthorizationFlow.GetProviderConfig().Domain, - TokenEndpoint: protoDeviceAuthorizationFlow.GetProviderConfig().GetTokenEndpoint(), - DeviceAuthEndpoint: protoDeviceAuthorizationFlow.GetProviderConfig().GetDeviceAuthEndpoint(), - Scope: protoDeviceAuthorizationFlow.GetProviderConfig().GetScope(), - UseIDToken: protoDeviceAuthorizationFlow.GetProviderConfig().GetUseIDToken(), - }, - } - - // keep compatibility with older management versions - if deviceAuthorizationFlow.ProviderConfig.Scope == "" { - deviceAuthorizationFlow.ProviderConfig.Scope = "openid" - } - - err = isDeviceAuthProviderConfigValid(deviceAuthorizationFlow.ProviderConfig) - if err != nil { - return DeviceAuthorizationFlow{}, err - } - - return deviceAuthorizationFlow, nil -} - -func isDeviceAuthProviderConfigValid(config DeviceAuthProviderConfig) error { - errorMSGFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" - if config.Audience == "" { - return fmt.Errorf(errorMSGFormat, "Audience") - } - if config.ClientID == "" { - return fmt.Errorf(errorMSGFormat, "Client ID") - } - if config.TokenEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Token Endpoint") - } - if config.DeviceAuthEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Device Auth Endpoint") - } - if config.Scope == "" { - return fmt.Errorf(errorMSGFormat, "Device Auth Scopes") - } - return nil -} diff --git a/client/internal/dns/local/local.go b/client/internal/dns/local/local.go index 63c2428ce..b374bcc6a 100644 --- a/client/internal/dns/local/local.go +++ b/client/internal/dns/local/local.go @@ -81,7 +81,10 @@ func (d *Resolver) ProbeAvailability() {} // ServeDNS handles a DNS request func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - logger := log.WithField("request_id", resutil.GetRequestID(w)) + logger := log.WithFields(log.Fields{ + "request_id": resutil.GetRequestID(w), + "dns_id": fmt.Sprintf("%04x", r.Id), + }) if len(r.Question) == 0 { logger.Debug("received local resolver request with no question") @@ -120,7 +123,7 @@ func (d *Resolver) determineRcode(question dns.Question, result lookupResult) in } // No records found, but domain exists with different record types (NODATA) - if d.hasRecordsForDomain(domain.Domain(question.Name)) { + if d.hasRecordsForDomain(domain.Domain(question.Name), question.Qtype) { return dns.RcodeSuccess } @@ -164,11 +167,15 @@ func (d *Resolver) continueToNext(logger *log.Entry, w dns.ResponseWriter, r *dn } // hasRecordsForDomain checks if any records exist for the given domain name regardless of type -func (d *Resolver) hasRecordsForDomain(domainName domain.Domain) bool { +func (d *Resolver) hasRecordsForDomain(domainName domain.Domain, qType uint16) bool { d.mu.RLock() defer d.mu.RUnlock() _, exists := d.domains[domainName] + if !exists && supportsWildcard(qType) { + testWild := transformDomainToWildcard(string(domainName)) + _, exists = d.domains[domain.Domain(testWild)] + } return exists } @@ -195,6 +202,16 @@ type lookupResult struct { func (d *Resolver) lookupRecords(logger *log.Entry, question dns.Question) lookupResult { d.mu.RLock() records, found := d.records[question] + usingWildcard := false + wildQuestion := transformToWildcard(question) + // RFC 4592 section 2.2.1: wildcard only matches if the name does NOT exist in the zone. + // If the domain exists with any record type, return NODATA instead of wildcard match. + if !found && supportsWildcard(question.Qtype) { + if _, domainExists := d.domains[domain.Domain(question.Name)]; !domainExists { + records, found = d.records[wildQuestion] + usingWildcard = found + } + } if !found { d.mu.RUnlock() @@ -216,18 +233,53 @@ func (d *Resolver) lookupRecords(logger *log.Entry, question dns.Question) looku // if there's more than one record, rotate them (round-robin) if len(recordsCopy) > 1 { d.mu.Lock() - records = d.records[question] + q := question + if usingWildcard { + q = wildQuestion + } + records = d.records[q] if len(records) > 1 { first := records[0] records = append(records[1:], first) - d.records[question] = records + d.records[q] = records } d.mu.Unlock() } + if usingWildcard { + return responseFromWildRecords(question.Name, wildQuestion.Name, recordsCopy) + } + return lookupResult{records: recordsCopy, rcode: dns.RcodeSuccess} } +func transformToWildcard(question dns.Question) dns.Question { + wildQuestion := question + wildQuestion.Name = transformDomainToWildcard(wildQuestion.Name) + return wildQuestion +} + +func transformDomainToWildcard(domain string) string { + s := strings.Split(domain, ".") + s[0] = "*" + return strings.Join(s, ".") +} + +func supportsWildcard(queryType uint16) bool { + return queryType != dns.TypeNS && queryType != dns.TypeSOA +} + +func responseFromWildRecords(originalName, wildName string, wildRecords []dns.RR) lookupResult { + records := make([]dns.RR, len(wildRecords)) + for i, record := range wildRecords { + copiedRecord := dns.Copy(record) + copiedRecord.Header().Name = originalName + records[i] = copiedRecord + } + + return lookupResult{records: records, rcode: dns.RcodeSuccess} +} + // lookupCNAMEChain follows a CNAME chain and returns the CNAME records along with // the final resolved record of the requested type. This is required for musl libc // compatibility, which expects the full answer chain rather than just the CNAME. @@ -237,6 +289,13 @@ func (d *Resolver) lookupCNAMEChain(logger *log.Entry, cnameQuestion dns.Questio for range maxDepth { cnameRecords := d.getRecords(cnameQuestion) + if len(cnameRecords) == 0 && supportsWildcard(targetType) { + wildQuestion := transformToWildcard(cnameQuestion) + if wildRecords := d.getRecords(wildQuestion); len(wildRecords) > 0 { + cnameRecords = responseFromWildRecords(cnameQuestion.Name, wildQuestion.Name, wildRecords).records + } + } + if len(cnameRecords) == 0 { break } @@ -303,7 +362,7 @@ func (d *Resolver) resolveCNAMETarget(logger *log.Entry, targetName string, targ } // domain exists locally but not this record type (NODATA) - if d.hasRecordsForDomain(domain.Domain(targetName)) { + if d.hasRecordsForDomain(domain.Domain(targetName), targetType) { return lookupResult{rcode: dns.RcodeSuccess} } diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index 1c7cad5d1..73f70035f 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -47,6 +47,24 @@ func TestLocalResolver_ServeDNS(t *testing.T) { RData: "www.netbird.io", } + wild := "wild.netbird.cloud." + + recordWild := nbdns.SimpleRecord{ + Name: "*." + wild, + Type: 1, + Class: nbdns.DefaultClass, + TTL: 300, + RData: "1.2.3.4", + } + + specificRecord := nbdns.SimpleRecord{ + Name: "existing." + wild, + Type: 1, + Class: nbdns.DefaultClass, + TTL: 300, + RData: "5.6.7.8", + } + testCases := []struct { name string inputRecord nbdns.SimpleRecord @@ -69,12 +87,23 @@ func TestLocalResolver_ServeDNS(t *testing.T) { inputMSG: new(dns.Msg).SetQuestion("not.found.com", dns.TypeA), responseShouldBeNil: true, }, + { + name: "Should Resolve A Wild Record", + inputRecord: recordWild, + inputMSG: new(dns.Msg).SetQuestion("test."+wild, dns.TypeA), + }, + { + name: "Should Resolve A more specific Record", + inputRecord: specificRecord, + inputMSG: new(dns.Msg).SetQuestion(specificRecord.Name, dns.TypeA), + }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { resolver := NewResolver() _ = resolver.RegisterRecord(testCase.inputRecord) + _ = resolver.RegisterRecord(recordWild) var responseMSG *dns.Msg responseWriter := &test.MockResponseWriter{ WriteMsgFunc: func(m *dns.Msg) error { @@ -93,7 +122,7 @@ func TestLocalResolver_ServeDNS(t *testing.T) { } answerString := responseMSG.Answer[0].String() - if !strings.Contains(answerString, testCase.inputRecord.Name) { + if !strings.Contains(answerString, testCase.inputMSG.Question[0].Name) { t.Fatalf("answer doesn't contain the same domain name: \nWant: %s\nGot:%s", testCase.name, answerString) } if !strings.Contains(answerString, dns.Type(testCase.inputRecord.Type).String()) { @@ -1341,6 +1370,1210 @@ func TestLocalResolver_FallthroughCaseInsensitive(t *testing.T) { assert.True(t, responseMSG.MsgHdr.Zero, "Should fallthrough for non-authoritative zone with case-insensitive match") } +// TestLocalResolver_WildcardCNAME tests wildcard CNAME record handling for non-CNAME queries +func TestLocalResolver_WildcardCNAME(t *testing.T) { + t.Run("wildcard CNAME resolves A query with internal target", func(t *testing.T) { + resolver := NewResolver() + + // Configure wildcard CNAME pointing to internal A record + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should resolve via wildcard CNAME") + require.Len(t, resp.Answer, 2, "Should have CNAME + A record") + + // Verify CNAME has the original query name, not the wildcard + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok, "First answer should be CNAME") + assert.Equal(t, "foo.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten to query name") + assert.Equal(t, "target.example.com.", cname.Target) + + // Verify A record + a, ok := resp.Answer[1].(*dns.A) + require.True(t, ok, "Second answer should be A record") + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard CNAME resolves AAAA query with internal target", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("bar.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should resolve via wildcard CNAME") + require.Len(t, resp.Answer, 2, "Should have CNAME + AAAA record") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "bar.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + + aaaa, ok := resp.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("specific record takes precedence over wildcard CNAME", func(t *testing.T) { + resolver := NewResolver() + + // Both wildcard CNAME and specific A record exist + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1, "Should return specific A record only") + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "192.168.1.1", a.A.String()) + }) + + t.Run("specific CNAME takes precedence over wildcard CNAME", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "wildcard-target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "specific-target.example.com."}, + {Name: "specific-target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.1.1.1"}, + {Name: "wildcard-target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.2.2.2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.GreaterOrEqual(t, len(resp.Answer), 1) + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "specific-target.example.com.", cname.Target, "Should use specific CNAME, not wildcard") + }) + + t.Run("wildcard CNAME to non-existent internal target returns NXDOMAIN with CNAME", func(t *testing.T) { + resolver := NewResolver() + + // Wildcard CNAME pointing to non-existent internal target + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "nonexistent.example.com."}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + // Per RFC 6604, CNAME chains should return the rcode of the final target. + // When the wildcard CNAME target doesn't exist in the managed zone, this + // returns NXDOMAIN with the CNAME record included. + // Note: Current implementation returns NODATA (success) because the wildcard + // domain exists. This test documents the actual behavior. + if resp.Rcode == dns.RcodeNameError { + // RFC-compliant behavior: NXDOMAIN with CNAME + require.Len(t, resp.Answer, 1, "Should include the CNAME pointing to non-existent target") + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "foo.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + assert.Equal(t, "nonexistent.example.com.", cname.Target) + } else { + // Current behavior: NODATA (success with CNAME but target not found) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Returns NODATA when wildcard exists but target doesn't") + } + }) + + t.Run("wildcard CNAME with multi-level subdomain", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + // Query with multi-level subdomain - wildcard should only match first label + // Standard DNS wildcards only match a single label, so sub.domain.example.com + // should NOT match *.example.com - this tests current implementation behavior + msg := new(dns.Msg).SetQuestion("sub.domain.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + }) + + t.Run("wildcard CNAME NODATA when target has no matching type", func(t *testing.T) { + resolver := NewResolver() + + // Wildcard CNAME to target that only has A record, query for AAAA + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no answer for AAAA)") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "foo.example.com.", cname.Hdr.Name) + }) + + t.Run("direct CNAME query for wildcard record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + // Direct CNAME query should also work via wildcard + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeCNAME) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "foo.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + assert.Equal(t, "target.example.com.", cname.Target) + }) + + t.Run("wildcard CNAME case insensitive query", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("FOO.EXAMPLE.COM.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode, "Wildcard CNAME should match case-insensitively") + require.Len(t, resp.Answer, 2) + }) + + t.Run("wildcard A and wildcard CNAME coexist - A takes precedence", func(t *testing.T) { + resolver := NewResolver() + + // Both wildcard A and wildcard CNAME exist + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + // A record should be returned, not CNAME + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok, "Wildcard A should take precedence over wildcard CNAME for A query") + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard CNAME with chained CNAMEs", func(t *testing.T) { + resolver := NewResolver() + + // Wildcard CNAME -> another CNAME -> A record + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "hop1.example.com."}, + {Name: "hop1.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "final.example.com."}, + {Name: "final.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 3, "Should have wildcard CNAME + hop1 CNAME + A record") + + // First should be the wildcard CNAME with rewritten name + cname1, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "anyhost.example.com.", cname1.Hdr.Name) + assert.Equal(t, "hop1.example.com.", cname1.Target) + }) +} + +// TestLocalResolver_WildcardAandAAAA tests wildcard A and AAAA record handling +func TestLocalResolver_WildcardAandAAAA(t *testing.T) { + t.Run("wildcard A record resolves with owner name rewriting", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "anyhost.example.com.", a.Hdr.Name, "Owner name should be rewritten to query name") + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard AAAA record resolves with owner name rewriting", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + aaaa, ok := resp.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "anyhost.example.com.", aaaa.Hdr.Name, "Owner name should be rewritten to query name") + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("NODATA when querying AAAA but only wildcard A exists", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no answer)") + assert.Len(t, resp.Answer, 0, "Should have no AAAA answer") + }) + + t.Run("NODATA when querying A but only wildcard AAAA exists", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no answer)") + assert.Len(t, resp.Answer, 0, "Should have no A answer") + }) + + t.Run("dual-stack wildcard returns both A and AAAA separately", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // Query A + msgA := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 1) + a, ok := respA.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // Query AAAA + msgAAAA := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 1) + aaaa, ok := respAAAA.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("specific A takes precedence over wildcard A", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "specific.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "192.168.1.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "192.168.1.1", a.A.String(), "Specific record should take precedence") + }) + + t.Run("specific AAAA takes precedence over wildcard AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "specific.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + aaaa, ok := resp.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::2", aaaa.AAAA.String(), "Specific record should take precedence") + }) + + t.Run("multiple wildcard A records round-robin", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2"}, + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.3"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("anyhost.example.com.", dns.TypeA) + + var firstIPs []string + for i := 0; i < 3; i++ { + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Len(t, resp.Answer, 3, "Should return all 3 A records") + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + firstIPs = append(firstIPs, a.A.String()) + + // Verify owner name is rewritten for all records + for _, ans := range resp.Answer { + assert.Equal(t, "anyhost.example.com.", ans.Header().Name) + } + } + + // Verify rotation happened + assert.NotEqual(t, firstIPs[0], firstIPs[1], "First record should rotate") + assert.NotEqual(t, firstIPs[1], firstIPs[2], "Second rotation should differ") + }) + + t.Run("wildcard A case insensitive", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("ANYHOST.EXAMPLE.COM.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + }) + + t.Run("wildcard does not match multi-level subdomain", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + // *.example.com should NOT match sub.domain.example.com (standard DNS behavior) + msg := new(dns.Msg).SetQuestion("sub.domain.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + // This depends on implementation - standard DNS wildcards only match single label + // Current implementation replaces first label with *, so it WOULD match + // This test documents the current behavior + }) + + t.Run("wildcard with existing domain but different type returns NODATA", func(t *testing.T) { + resolver := NewResolver() + + // Specific A record exists, but query for TXT on wildcard domain + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("test.example.com.", dns.TypeTXT) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA for existing wildcard domain with different type") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("mixed specific and wildcard returns correct records", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "specific.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // Query A for specific - should use wildcard + msgA := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + // This could be NODATA since specific.example.com exists but has no A + // or could return wildcard A - depends on implementation + // The current behavior returns NODATA because specific domain exists + + // Query AAAA for specific - should use specific record + msgAAAA := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 1) + aaaa, ok := respAAAA.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) +} + +// TestLocalResolver_WildcardEdgeCases tests edge cases for wildcard record handling +func TestLocalResolver_WildcardEdgeCases(t *testing.T) { + t.Run("wildcard does not match NS queries", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeNS) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeNameError, resp.Rcode, "NS queries should not match wildcards") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("wildcard does not match SOA queries", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("foo.example.com.", dns.TypeSOA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeNameError, resp.Rcode, "SOA queries should not match wildcards") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("apex wildcard query", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + // Query for *.example.com directly (the wildcard itself) + msg := new(dns.Msg).SetQuestion("*.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("wildcard TXT record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeTXT), Class: nbdns.DefaultClass, TTL: 300, RData: "v=spf1 -all"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("mail.example.com.", dns.TypeTXT) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + txt, ok := resp.Answer[0].(*dns.TXT) + require.True(t, ok) + assert.Equal(t, "mail.example.com.", txt.Hdr.Name, "TXT owner should be rewritten") + }) + + t.Run("wildcard MX record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeMX), Class: nbdns.DefaultClass, TTL: 300, RData: "10 mail.example.com."}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("sub.example.com.", dns.TypeMX) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1) + + mx, ok := resp.Answer[0].(*dns.MX) + require.True(t, ok) + assert.Equal(t, "sub.example.com.", mx.Hdr.Name, "MX owner should be rewritten") + }) + + t.Run("non-authoritative zone with wildcard CNAME triggers fallthrough for unmatched names", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + NonAuthoritative: true, + Records: []nbdns.SimpleRecord{ + {Name: "*.sub.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + // Query for name not matching the wildcard pattern + msg := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.True(t, resp.MsgHdr.Zero, "Should trigger fallthrough for non-authoritative zone") + }) +} + +// TestLocalResolver_MixedRecordTypes tests scenarios with A, AAAA, and CNAME records combined +func TestLocalResolver_MixedRecordTypes(t *testing.T) { + t.Run("specific A with wildcard CNAME - A query uses specific A", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1, "Should return only the specific A record") + + a, ok := resp.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String(), "Should use specific A, not follow wildcard CNAME") + }) + + t.Run("specific AAAA with wildcard CNAME - AAAA query uses specific AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "specific.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::2"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("specific.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 1, "Should return only the specific AAAA record") + + aaaa, ok := resp.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String(), "Should use specific AAAA, not follow wildcard CNAME") + }) + + t.Run("specific A only - AAAA query returns NODATA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no AAAA)") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("specific AAAA only - A query returns NODATA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA (success with no A)") + assert.Len(t, resp.Answer, 0) + }) + + t.Run("CNAME with both A and AAAA target - A query returns CNAME + A", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 2, "Should have CNAME + A") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname.Target) + + a, ok := resp.Answer[1].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + }) + + t.Run("CNAME with both A and AAAA target - AAAA query returns CNAME + AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + require.Equal(t, dns.RcodeSuccess, resp.Rcode) + require.Len(t, resp.Answer, 2, "Should have CNAME + AAAA") + + cname, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname.Target) + + aaaa, ok := resp.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("CNAME to target with only A - AAAA query returns CNAME only (NODATA)", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeAAAA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA with CNAME") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + + _, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + }) + + t.Run("CNAME to target with only AAAA - A query returns CNAME only (NODATA)", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + msg := new(dns.Msg).SetQuestion("alias.example.com.", dns.TypeA) + var resp *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp = m; return nil }}, msg) + + require.NotNil(t, resp) + assert.Equal(t, dns.RcodeSuccess, resp.Rcode, "Should return NODATA with CNAME") + require.Len(t, resp.Answer, 1, "Should have only CNAME") + + _, ok := resp.Answer[0].(*dns.CNAME) + require.True(t, ok) + }) + + t.Run("wildcard A + wildcard AAAA + wildcard CNAME - each query type returns correct record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + }, + }}) + + // A query should return wildcard A (not CNAME) + msgA := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 1) + a, ok := respA.Answer[0].(*dns.A) + require.True(t, ok, "A query should return A record, not CNAME") + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query should return wildcard AAAA (not CNAME) + msgAAAA := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 1) + aaaa, ok := respAAAA.Answer[0].(*dns.AAAA) + require.True(t, ok, "AAAA query should return AAAA record, not CNAME") + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + + // CNAME query should return wildcard CNAME + msgCNAME := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeCNAME) + var respCNAME *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respCNAME = m; return nil }}, msgCNAME) + + require.NotNil(t, respCNAME) + require.Equal(t, dns.RcodeSuccess, respCNAME.Rcode) + require.Len(t, respCNAME.Answer, 1) + cname, ok := respCNAME.Answer[0].(*dns.CNAME) + require.True(t, ok, "CNAME query should return CNAME record") + assert.Equal(t, "target.example.com.", cname.Target) + }) + + t.Run("dual-stack host with both A and AAAA", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.2"}, + {Name: "host.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + {Name: "host.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::2"}, + }, + }}) + + // A query + msgA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 2, "Should return both A records") + for _, ans := range respA.Answer { + _, ok := ans.(*dns.A) + require.True(t, ok) + } + + // AAAA query + msgAAAA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 2, "Should return both AAAA records") + for _, ans := range respAAAA.Answer { + _, ok := ans.(*dns.AAAA) + require.True(t, ok) + } + }) + + t.Run("CNAME chain with mixed record types at target", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias1.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "alias2.example.com."}, + {Name: "alias2.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // A query through chain + msgA := new(dns.Msg).SetQuestion("alias1.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 3, "Should have 2 CNAMEs + 1 A") + + // Verify chain order + cname1, ok := respA.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "alias2.example.com.", cname1.Target) + + cname2, ok := respA.Answer[1].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "target.example.com.", cname2.Target) + + a, ok := respA.Answer[2].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query through chain + msgAAAA := new(dns.Msg).SetQuestion("alias1.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 3, "Should have 2 CNAMEs + 1 AAAA") + + aaaa, ok := respAAAA.Answer[2].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("wildcard CNAME with dual-stack target", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "*.example.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.example.com."}, + {Name: "target.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "target.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // A query via wildcard CNAME + msgA := new(dns.Msg).SetQuestion("any.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 2, "Should have CNAME + A") + + cname, ok := respA.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "any.example.com.", cname.Hdr.Name, "CNAME owner should be rewritten") + + a, ok := respA.Answer[1].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query via wildcard CNAME + msgAAAA := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + require.Equal(t, dns.RcodeSuccess, respAAAA.Rcode) + require.Len(t, respAAAA.Answer, 2, "Should have CNAME + AAAA") + + cname2, ok := respAAAA.Answer[0].(*dns.CNAME) + require.True(t, ok) + assert.Equal(t, "other.example.com.", cname2.Hdr.Name, "CNAME owner should be rewritten") + + aaaa, ok := respAAAA.Answer[1].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + }) + + t.Run("specific A + wildcard AAAA - each query type returns correct record", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{{ + Domain: "example.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.example.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.0.0.1"}, + {Name: "*.example.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8::1"}, + }, + }}) + + // A query for host should return specific A + msgA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeA) + var respA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respA = m; return nil }}, msgA) + + require.NotNil(t, respA) + require.Equal(t, dns.RcodeSuccess, respA.Rcode) + require.Len(t, respA.Answer, 1) + a, ok := respA.Answer[0].(*dns.A) + require.True(t, ok) + assert.Equal(t, "10.0.0.1", a.A.String()) + + // AAAA query for host should return NODATA (specific A exists, no AAAA for host.example.com) + msgAAAA := new(dns.Msg).SetQuestion("host.example.com.", dns.TypeAAAA) + var respAAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAA = m; return nil }}, msgAAAA) + + require.NotNil(t, respAAAA) + // RFC 4592 section 2.2.1: wildcard should NOT match when the name EXISTS in zone. + // host.example.com exists (has A record), so AAAA query returns NODATA, not wildcard. + assert.Equal(t, dns.RcodeSuccess, respAAAA.Rcode, "Should return NODATA for existing host without AAAA") + assert.Len(t, respAAAA.Answer, 0, "RFC 4592: wildcard should not match when name exists") + + // AAAA query for other host should return wildcard AAAA + msgAAAAOther := new(dns.Msg).SetQuestion("other.example.com.", dns.TypeAAAA) + var respAAAAOther *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { respAAAAOther = m; return nil }}, msgAAAAOther) + + require.NotNil(t, respAAAAOther) + require.Equal(t, dns.RcodeSuccess, respAAAAOther.Rcode) + require.Len(t, respAAAAOther.Answer, 1) + aaaa, ok := respAAAAOther.Answer[0].(*dns.AAAA) + require.True(t, ok) + assert.Equal(t, "2001:db8::1", aaaa.AAAA.String()) + assert.Equal(t, "other.example.com.", aaaa.Hdr.Name, "Owner should be rewritten") + }) + + t.Run("multiple zones with mixed records", func(t *testing.T) { + resolver := NewResolver() + + resolver.Update([]nbdns.CustomZone{ + { + Domain: "zone1.com.", + Records: []nbdns.SimpleRecord{ + {Name: "host.zone1.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.1.0.1"}, + {Name: "host.zone1.com.", Type: int(dns.TypeAAAA), Class: nbdns.DefaultClass, TTL: 300, RData: "2001:db8:1::1"}, + }, + }, + { + Domain: "zone2.com.", + Records: []nbdns.SimpleRecord{ + {Name: "alias.zone2.com.", Type: int(dns.TypeCNAME), Class: nbdns.DefaultClass, TTL: 300, RData: "target.zone2.com."}, + {Name: "target.zone2.com.", Type: int(dns.TypeA), Class: nbdns.DefaultClass, TTL: 300, RData: "10.2.0.1"}, + }, + }, + }) + + // Query zone1 A + msg1A := new(dns.Msg).SetQuestion("host.zone1.com.", dns.TypeA) + var resp1A *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp1A = m; return nil }}, msg1A) + + require.NotNil(t, resp1A) + require.Equal(t, dns.RcodeSuccess, resp1A.Rcode) + require.Len(t, resp1A.Answer, 1) + + // Query zone1 AAAA + msg1AAAA := new(dns.Msg).SetQuestion("host.zone1.com.", dns.TypeAAAA) + var resp1AAAA *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp1AAAA = m; return nil }}, msg1AAAA) + + require.NotNil(t, resp1AAAA) + require.Equal(t, dns.RcodeSuccess, resp1AAAA.Rcode) + require.Len(t, resp1AAAA.Answer, 1) + + // Query zone2 via CNAME + msg2A := new(dns.Msg).SetQuestion("alias.zone2.com.", dns.TypeA) + var resp2A *dns.Msg + resolver.ServeDNS(&test.MockResponseWriter{WriteMsgFunc: func(m *dns.Msg) error { resp2A = m; return nil }}, msg2A) + + require.NotNil(t, resp2A) + require.Equal(t, dns.RcodeSuccess, resp2A.Rcode) + require.Len(t, resp2A.Answer, 2, "Should have CNAME + A") + }) +} + // BenchmarkFindZone_BestCase benchmarks zone lookup with immediate match (first label) func BenchmarkFindZone_BestCase(b *testing.B) { resolver := NewResolver() diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 654d280ef..0fbd32771 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -71,6 +71,11 @@ type upstreamResolverBase struct { statusRecorder *peer.Status } +type upstreamFailure struct { + upstream netip.AddrPort + reason string +} + func newUpstreamResolverBase(ctx context.Context, statusRecorder *peer.Status, domain string) *upstreamResolverBase { ctx, cancel := context.WithCancel(ctx) @@ -114,7 +119,10 @@ func (u *upstreamResolverBase) Stop() { // ServeDNS handles a DNS request func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { - logger := log.WithField("request_id", resutil.GetRequestID(w)) + logger := log.WithFields(log.Fields{ + "request_id": resutil.GetRequestID(w), + "dns_id": fmt.Sprintf("%04x", r.Id), + }) u.prepareRequest(r) @@ -123,11 +131,13 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } - if u.tryUpstreamServers(w, r, logger) { - return + ok, failures := u.tryUpstreamServers(w, r, logger) + if len(failures) > 0 { + u.logUpstreamFailures(r.Question[0].Name, failures, ok, logger) + } + if !ok { + u.writeErrorResponse(w, r, logger) } - - u.writeErrorResponse(w, r, logger) } func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) { @@ -136,7 +146,7 @@ func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) { } } -func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) bool { +func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) { timeout := u.upstreamTimeout if len(u.upstreamServers) > 1 { maxTotal := 5 * time.Second @@ -149,15 +159,19 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M } } + var failures []upstreamFailure for _, upstream := range u.upstreamServers { - if u.queryUpstream(w, r, upstream, timeout, logger) { - return true + if failure := u.queryUpstream(w, r, upstream, timeout, logger); failure != nil { + failures = append(failures, *failure) + } else { + return true, failures } } - return false + return false, failures } -func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) bool { +// queryUpstream queries a single upstream server. Returns nil on success, or failure info to try next upstream. +func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure { var rm *dns.Msg var t time.Duration var err error @@ -171,31 +185,32 @@ func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, u }() if err != nil { - u.handleUpstreamError(err, upstream, r.Question[0].Name, startTime, timeout, logger) - return false + return u.handleUpstreamError(err, upstream, startTime) } if rm == nil || !rm.Response { - logger.Warnf("no response from upstream %s for question domain=%s", upstream, r.Question[0].Name) - return false + return &upstreamFailure{upstream: upstream, reason: "no response"} } - return u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger) + if rm.Rcode == dns.RcodeServerFailure || rm.Rcode == dns.RcodeRefused { + return &upstreamFailure{upstream: upstream, reason: dns.RcodeToString[rm.Rcode]} + } + + u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger) + return nil } -func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.AddrPort, domain string, startTime time.Time, timeout time.Duration, logger *log.Entry) { +func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.AddrPort, startTime time.Time) *upstreamFailure { if !errors.Is(err, context.DeadlineExceeded) && !isTimeout(err) { - logger.Warnf("failed to query upstream %s for question domain=%s: %s", upstream, domain, err) - return + return &upstreamFailure{upstream: upstream, reason: err.Error()} } elapsed := time.Since(startTime) - timeoutMsg := fmt.Sprintf("upstream %s timed out for question domain=%s after %v (timeout=%v)", upstream, domain, elapsed.Truncate(time.Millisecond), timeout) + reason := fmt.Sprintf("timeout after %v", elapsed.Truncate(time.Millisecond)) if peerInfo := u.debugUpstreamTimeout(upstream); peerInfo != "" { - timeoutMsg += " " + peerInfo + reason += " " + peerInfo } - timeoutMsg += fmt.Sprintf(" - error: %v", err) - logger.Warn(timeoutMsg) + return &upstreamFailure{upstream: upstream, reason: reason} } func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, logger *log.Entry) bool { @@ -215,16 +230,34 @@ func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dn return true } -func (u *upstreamResolverBase) writeErrorResponse(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) { - logger.Errorf("all queries to the %s failed for question domain=%s", u, r.Question[0].Name) +func (u *upstreamResolverBase) logUpstreamFailures(domain string, failures []upstreamFailure, succeeded bool, logger *log.Entry) { + totalUpstreams := len(u.upstreamServers) + failedCount := len(failures) + failureSummary := formatFailures(failures) + if succeeded { + logger.Warnf("%d/%d upstreams failed for domain=%s: %s", failedCount, totalUpstreams, domain, failureSummary) + } else { + logger.Errorf("%d/%d upstreams failed for domain=%s: %s", failedCount, totalUpstreams, domain, failureSummary) + } +} + +func (u *upstreamResolverBase) writeErrorResponse(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) { m := new(dns.Msg) m.SetRcode(r, dns.RcodeServerFailure) if err := w.WriteMsg(m); err != nil { - logger.Errorf("failed to write error response for %s for question domain=%s: %s", u, r.Question[0].Name, err) + logger.Errorf("write error response for domain=%s: %s", r.Question[0].Name, err) } } +func formatFailures(failures []upstreamFailure) string { + parts := make([]string, 0, len(failures)) + for _, f := range failures { + parts = append(parts, fmt.Sprintf("%s=%s", f.upstream, f.reason)) + } + return strings.Join(parts, ", ") +} + // ProbeAvailability tests all upstream servers simultaneously and // disables the resolver if none work func (u *upstreamResolverBase) ProbeAvailability() { @@ -468,7 +501,6 @@ func netstackExchange(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upst return reply, nil } - // FormatPeerStatus formats peer connection status information for debugging DNS timeouts func FormatPeerStatus(peerState *peer.State) string { isConnected := peerState.ConnStatus == peer.StatusConnected diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index 2852f4775..8b06e4475 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -2,6 +2,7 @@ package dns import ( "context" + "fmt" "net" "net/netip" "strings" @@ -9,6 +10,8 @@ import ( "time" "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.zx2c4.com/wireguard/tun/netstack" "github.com/netbirdio/netbird/client/iface/device" @@ -140,6 +143,23 @@ func (c mockUpstreamResolver) exchange(_ context.Context, _ string, _ *dns.Msg) return c.r, c.rtt, c.err } +type mockUpstreamResponse struct { + msg *dns.Msg + err error +} + +type mockUpstreamResolverPerServer struct { + responses map[string]mockUpstreamResponse + rtt time.Duration +} + +func (c mockUpstreamResolverPerServer) exchange(_ context.Context, upstream string, _ *dns.Msg) (*dns.Msg, time.Duration, error) { + if r, ok := c.responses[upstream]; ok { + return r.msg, c.rtt, r.err + } + return nil, c.rtt, fmt.Errorf("no mock response for %s", upstream) +} + func TestUpstreamResolver_DeactivationReactivation(t *testing.T) { mockClient := &mockUpstreamResolver{ err: dns.ErrTime, @@ -191,3 +211,267 @@ func TestUpstreamResolver_DeactivationReactivation(t *testing.T) { t.Errorf("should be enabled") } } + +func TestUpstreamResolver_Failover(t *testing.T) { + upstream1 := netip.MustParseAddrPort("192.0.2.1:53") + upstream2 := netip.MustParseAddrPort("192.0.2.2:53") + + successAnswer := "192.0.2.100" + timeoutErr := &net.OpError{Op: "read", Err: fmt.Errorf("i/o timeout")} + + testCases := []struct { + name string + upstream1 mockUpstreamResponse + upstream2 mockUpstreamResponse + expectedRcode int + expectAnswer bool + expectTrySecond bool + }{ + { + name: "success on first upstream", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: false, + }, + { + name: "SERVFAIL from first should try second", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "REFUSED from first should try second", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeRefused, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "NXDOMAIN from first should NOT try second", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeNameError, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeNameError, + expectAnswer: false, + expectTrySecond: false, + }, + { + name: "timeout from first should try second", + upstream1: mockUpstreamResponse{err: timeoutErr}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "no response from first should try second", + upstream1: mockUpstreamResponse{msg: nil}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeSuccess, successAnswer)}, + expectedRcode: dns.RcodeSuccess, + expectAnswer: true, + expectTrySecond: true, + }, + { + name: "both upstreams return SERVFAIL", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "both upstreams timeout", + upstream1: mockUpstreamResponse{err: timeoutErr}, + upstream2: mockUpstreamResponse{err: timeoutErr}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "first SERVFAIL then timeout", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + upstream2: mockUpstreamResponse{err: timeoutErr}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "first timeout then SERVFAIL", + upstream1: mockUpstreamResponse{err: timeoutErr}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + { + name: "first REFUSED then SERVFAIL", + upstream1: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeRefused, "")}, + upstream2: mockUpstreamResponse{msg: buildMockResponse(dns.RcodeServerFailure, "")}, + expectedRcode: dns.RcodeServerFailure, + expectAnswer: false, + expectTrySecond: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var queriedUpstreams []string + mockClient := &mockUpstreamResolverPerServer{ + responses: map[string]mockUpstreamResponse{ + upstream1.String(): tc.upstream1, + upstream2.String(): tc.upstream2, + }, + rtt: time.Millisecond, + } + + trackingClient := &trackingMockClient{ + inner: mockClient, + queriedUpstreams: &queriedUpstreams, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resolver := &upstreamResolverBase{ + ctx: ctx, + upstreamClient: trackingClient, + upstreamServers: []netip.AddrPort{upstream1, upstream2}, + upstreamTimeout: UpstreamTimeout, + } + + var responseMSG *dns.Msg + responseWriter := &test.MockResponseWriter{ + WriteMsgFunc: func(m *dns.Msg) error { + responseMSG = m + return nil + }, + } + + inputMSG := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + resolver.ServeDNS(responseWriter, inputMSG) + + require.NotNil(t, responseMSG, "should write a response") + assert.Equal(t, tc.expectedRcode, responseMSG.Rcode, "unexpected rcode") + + if tc.expectAnswer { + require.NotEmpty(t, responseMSG.Answer, "expected answer records") + assert.Contains(t, responseMSG.Answer[0].String(), successAnswer) + } + + if tc.expectTrySecond { + assert.Len(t, queriedUpstreams, 2, "should have tried both upstreams") + assert.Equal(t, upstream1.String(), queriedUpstreams[0]) + assert.Equal(t, upstream2.String(), queriedUpstreams[1]) + } else { + assert.Len(t, queriedUpstreams, 1, "should have only tried first upstream") + assert.Equal(t, upstream1.String(), queriedUpstreams[0]) + } + }) + } +} + +type trackingMockClient struct { + inner *mockUpstreamResolverPerServer + queriedUpstreams *[]string +} + +func (t *trackingMockClient) exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error) { + *t.queriedUpstreams = append(*t.queriedUpstreams, upstream) + return t.inner.exchange(ctx, upstream, r) +} + +func buildMockResponse(rcode int, answer string) *dns.Msg { + m := new(dns.Msg) + m.Response = true + m.Rcode = rcode + + if rcode == dns.RcodeSuccess && answer != "" { + m.Answer = []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{ + Name: "example.com.", + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: 300, + }, + A: net.ParseIP(answer), + }, + } + } + return m +} + +func TestUpstreamResolver_SingleUpstreamFailure(t *testing.T) { + upstream := netip.MustParseAddrPort("192.0.2.1:53") + + mockClient := &mockUpstreamResolverPerServer{ + responses: map[string]mockUpstreamResponse{ + upstream.String(): {msg: buildMockResponse(dns.RcodeServerFailure, "")}, + }, + rtt: time.Millisecond, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resolver := &upstreamResolverBase{ + ctx: ctx, + upstreamClient: mockClient, + upstreamServers: []netip.AddrPort{upstream}, + upstreamTimeout: UpstreamTimeout, + } + + var responseMSG *dns.Msg + responseWriter := &test.MockResponseWriter{ + WriteMsgFunc: func(m *dns.Msg) error { + responseMSG = m + return nil + }, + } + + inputMSG := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + resolver.ServeDNS(responseWriter, inputMSG) + + require.NotNil(t, responseMSG, "should write a response") + assert.Equal(t, dns.RcodeServerFailure, responseMSG.Rcode, "single upstream SERVFAIL should return SERVFAIL") +} + +func TestFormatFailures(t *testing.T) { + testCases := []struct { + name string + failures []upstreamFailure + expected string + }{ + { + name: "empty slice", + failures: []upstreamFailure{}, + expected: "", + }, + { + name: "single failure", + failures: []upstreamFailure{ + {upstream: netip.MustParseAddrPort("8.8.8.8:53"), reason: "SERVFAIL"}, + }, + expected: "8.8.8.8:53=SERVFAIL", + }, + { + name: "multiple failures", + failures: []upstreamFailure{ + {upstream: netip.MustParseAddrPort("8.8.8.8:53"), reason: "SERVFAIL"}, + {upstream: netip.MustParseAddrPort("8.8.4.4:53"), reason: "timeout after 2s"}, + }, + expected: "8.8.8.8:53=SERVFAIL, 8.8.4.4:53=timeout after 2s", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := formatFailures(tc.failures) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/client/internal/engine.go b/client/internal/engine.go index 0182b2530..f0693e82c 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -31,6 +31,7 @@ import ( "github.com/netbirdio/netbird/client/iface/device" "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/acl" + "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/internal/dns" dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config" "github.com/netbirdio/netbird/client/internal/dnsfwd" @@ -42,12 +43,14 @@ import ( "github.com/netbirdio/netbird/client/internal/peer/guard" icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/relay" "github.com/netbirdio/netbird/client/internal/rosenpass" "github.com/netbirdio/netbird/client/internal/routemanager" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" "github.com/netbirdio/netbird/client/internal/statemanager" "github.com/netbirdio/netbird/client/internal/updatemanager" + "github.com/netbirdio/netbird/client/jobexec" cProto "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/shared/management/domain" semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group" @@ -132,6 +135,11 @@ type EngineConfig struct { LazyConnectionEnabled bool MTU uint16 + + // for debug bundle generation + ProfileConfig *profilemanager.Config + + LogPath string } // Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers. @@ -195,7 +203,8 @@ type Engine struct { stateManager *statemanager.Manager srWatcher *guard.SRWatcher - // Sync response persistence + // Sync response persistence (protected by syncRespMux) + syncRespMux sync.RWMutex persistSyncResponse bool latestSyncResponse *mgmProto.SyncResponse connSemaphore *semaphoregroup.SemaphoreGroup @@ -211,6 +220,9 @@ type Engine struct { shutdownWg sync.WaitGroup probeStunTurn *relay.StunTurnProbe + + jobExecutor *jobexec.Executor + jobExecutorWG sync.WaitGroup } // Peer is an instance of the Connection Peer @@ -224,7 +236,18 @@ type localIpUpdater interface { } // NewEngine creates a new Connection Engine with probes attached -func NewEngine(clientCtx context.Context, clientCancel context.CancelFunc, signalClient signal.Client, mgmClient mgm.Client, relayManager *relayClient.Manager, config *EngineConfig, mobileDep MobileDependency, statusRecorder *peer.Status, checks []*mgmProto.Checks, stateManager *statemanager.Manager) *Engine { +func NewEngine( + clientCtx context.Context, + clientCancel context.CancelFunc, + signalClient signal.Client, + mgmClient mgm.Client, + relayManager *relayClient.Manager, + config *EngineConfig, + mobileDep MobileDependency, + statusRecorder *peer.Status, + checks []*mgmProto.Checks, + stateManager *statemanager.Manager, +) *Engine { engine := &Engine{ clientCtx: clientCtx, clientCancel: clientCancel, @@ -244,6 +267,7 @@ func NewEngine(clientCtx context.Context, clientCancel context.CancelFunc, signa checks: checks, connSemaphore: semaphoregroup.NewSemaphoreGroup(connInitLimit), probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), + jobExecutor: jobexec.NewExecutor(), } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) @@ -312,6 +336,8 @@ func (e *Engine) Stop() error { e.cancel() } + e.jobExecutorWG.Wait() // block until job goroutines finish + e.close() // stop flow manager after wg interface is gone @@ -479,6 +505,15 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) return fmt.Errorf("up wg interface: %w", err) } + // Set up notrack rules immediately after proxy is listening to prevent + // conntrack entries from being created before the rules are in place + e.setupWGProxyNoTrack() + + // Set the WireGuard interface for rosenpass after interface is up + if e.rpManager != nil { + e.rpManager.SetInterface(e.wgInterface) + } + // if inbound conns are blocked there is no need to create the ACL manager if e.firewall != nil && !e.config.BlockInbound { e.acl = acl.NewDefaultManager(e.firewall) @@ -500,6 +535,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.receiveSignalEvents() e.receiveManagementEvents() + e.receiveJobEvents() // starting network monitor at the very last to avoid disruptions e.startNetworkMonitor() @@ -585,6 +621,23 @@ func (e *Engine) initFirewall() error { return nil } +// setupWGProxyNoTrack configures connection tracking exclusion for WireGuard proxy traffic. +// This prevents conntrack/MASQUERADE from affecting loopback traffic between WireGuard and the eBPF proxy. +func (e *Engine) setupWGProxyNoTrack() { + if e.firewall == nil { + return + } + + proxyPort := e.wgInterface.GetProxyPort() + if proxyPort == 0 { + return + } + + if err := e.firewall.SetupEBPFProxyNoTrack(proxyPort, uint16(e.config.WgPort)); err != nil { + log.Warnf("failed to setup ebpf proxy notrack: %v", err) + } +} + func (e *Engine) blockLanAccess() { if e.config.BlockInbound { // no need to set up extra deny rules if inbound is already blocked in general @@ -828,9 +881,18 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error { return nil } + // Persist sync response under the dedicated lock (syncRespMux), not under syncMsgMux. + // Read the storage-enabled flag under the syncRespMux too. + e.syncRespMux.RLock() + enabled := e.persistSyncResponse + e.syncRespMux.RUnlock() + // Store sync response if persistence is enabled - if e.persistSyncResponse { + if enabled { + e.syncRespMux.Lock() e.latestSyncResponse = update + e.syncRespMux.Unlock() + log.Debugf("sync response persisted with serial %d", nm.GetSerial()) } @@ -960,6 +1022,80 @@ func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error { return nil } +func (e *Engine) receiveJobEvents() { + e.jobExecutorWG.Add(1) + go func() { + defer e.jobExecutorWG.Done() + err := e.mgmClient.Job(e.ctx, func(msg *mgmProto.JobRequest) *mgmProto.JobResponse { + resp := mgmProto.JobResponse{ + ID: msg.ID, + Status: mgmProto.JobStatus_failed, + } + switch params := msg.WorkloadParameters.(type) { + case *mgmProto.JobRequest_Bundle: + bundleResult, err := e.handleBundle(params.Bundle) + if err != nil { + log.Errorf("handling bundle: %v", err) + resp.Reason = []byte(err.Error()) + return &resp + } + resp.Status = mgmProto.JobStatus_succeeded + resp.WorkloadResults = bundleResult + return &resp + default: + resp.Reason = []byte(jobexec.ErrJobNotImplemented.Error()) + return &resp + } + }) + if err != nil { + // happens if management is unavailable for a long time. + // We want to cancel the operation of the whole client + _ = CtxGetState(e.ctx).Wrap(ErrResetConnection) + e.clientCancel() + return + } + log.Info("stopped receiving jobs from Management Service") + }() + log.Info("connecting to Management Service jobs stream") +} + +func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobResponse_Bundle, error) { + log.Infof("handle remote debug bundle request: %s", params.String()) + syncResponse, err := e.GetLatestSyncResponse() + if err != nil { + log.Warnf("get latest sync response: %v", err) + } + + bundleDeps := debug.GeneratorDependencies{ + InternalConfig: e.config.ProfileConfig, + StatusRecorder: e.statusRecorder, + SyncResponse: syncResponse, + LogPath: e.config.LogPath, + RefreshStatus: func() { + e.RunHealthProbes(true) + }, + } + + bundleJobParams := debug.BundleConfig{ + Anonymize: params.Anonymize, + IncludeSystemInfo: true, + LogFileCount: uint32(params.LogFileCount), + } + + waitFor := time.Duration(params.BundleForTime) * time.Minute + + uploadKey, err := e.jobExecutor.BundleJob(e.ctx, bundleDeps, bundleJobParams, waitFor, e.config.ProfileConfig.ManagementURL.String()) + if err != nil { + return nil, err + } + + response := &mgmProto.JobResponse_Bundle{ + Bundle: &mgmProto.BundleResult{ + UploadKey: uploadKey, + }, + } + return response, nil +} // receiveManagementEvents connects to the Management Service event stream to receive updates from the management service // E.g. when a new peer has been registered and we are allowed to connect to it. @@ -1405,6 +1541,7 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV if e.rpManager != nil { peerConn.SetOnConnected(e.rpManager.OnConnected) peerConn.SetOnDisconnected(e.rpManager.OnDisconnected) + peerConn.SetRosenpassInitializedPresharedKeyValidator(e.rpManager.IsPresharedKeyInitialized) } return peerConn, nil @@ -1528,6 +1665,7 @@ func (e *Engine) parseNATExternalIPMappings() []string { func (e *Engine) close() { log.Debugf("removing Netbird interface %s", e.config.WgIfaceName) + if e.wgInterface != nil { if err := e.wgInterface.Close(); err != nil { log.Errorf("failed closing Netbird interface %s %v", e.config.WgIfaceName, err) @@ -1714,7 +1852,7 @@ func (e *Engine) getRosenpassAddr() string { return "" } -// RunHealthProbes executes health checks for Signal, Management, Relay and WireGuard services +// RunHealthProbes executes health checks for Signal, Management, Relay, and WireGuard services // and updates the status recorder with the latest states. func (e *Engine) RunHealthProbes(waitForResult bool) bool { e.syncMsgMux.Lock() @@ -1728,23 +1866,8 @@ func (e *Engine) RunHealthProbes(waitForResult bool) bool { stuns := slices.Clone(e.STUNs) turns := slices.Clone(e.TURNs) - if e.wgInterface != nil { - stats, err := e.wgInterface.GetStats() - if err != nil { - log.Warnf("failed to get wireguard stats: %v", err) - e.syncMsgMux.Unlock() - return false - } - for _, key := range e.peerStore.PeersPubKey() { - // wgStats could be zero value, in which case we just reset the stats - wgStats, ok := stats[key] - if !ok { - continue - } - if err := e.statusRecorder.UpdateWireGuardPeerState(key, wgStats); err != nil { - log.Debugf("failed to update wg stats for peer %s: %s", key, err) - } - } + if err := e.statusRecorder.RefreshWireGuardStats(); err != nil { + log.Debugf("failed to refresh WireGuard stats: %v", err) } e.syncMsgMux.Unlock() @@ -1848,8 +1971,8 @@ func (e *Engine) stopDNSServer() { // SetSyncResponsePersistence enables or disables sync response persistence func (e *Engine) SetSyncResponsePersistence(enabled bool) { - e.syncMsgMux.Lock() - defer e.syncMsgMux.Unlock() + e.syncRespMux.Lock() + defer e.syncRespMux.Unlock() if enabled == e.persistSyncResponse { return @@ -1864,20 +1987,22 @@ func (e *Engine) SetSyncResponsePersistence(enabled bool) { // GetLatestSyncResponse returns the stored sync response if persistence is enabled func (e *Engine) GetLatestSyncResponse() (*mgmProto.SyncResponse, error) { - e.syncMsgMux.Lock() - defer e.syncMsgMux.Unlock() + e.syncRespMux.RLock() + enabled := e.persistSyncResponse + latest := e.latestSyncResponse + e.syncRespMux.RUnlock() - if !e.persistSyncResponse { + if !enabled { return nil, errors.New("sync response persistence is disabled") } - if e.latestSyncResponse == nil { + if latest == nil { //nolint:nilnil return nil, nil } - log.Debugf("Retrieving latest sync response with size %d bytes", proto.Size(e.latestSyncResponse)) - sr, ok := proto.Clone(e.latestSyncResponse).(*mgmProto.SyncResponse) + log.Debugf("Retrieving latest sync response with size %d bytes", proto.Size(latest)) + sr, ok := proto.Clone(latest).(*mgmProto.SyncResponse) if !ok { return nil, fmt.Errorf("failed to clone sync response") } diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index a15ee0581..012c8ad6e 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/netbirdio/netbird/client/internal/stdnet" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/management-integrations/integrations" @@ -106,6 +107,7 @@ type MockWGIface struct { GetStatsFunc func() (map[string]configurer.WGStats, error) GetInterfaceGUIDStringFunc func() (string, error) GetProxyFunc func() wgproxy.Proxy + GetProxyPortFunc func() uint16 GetNetFunc func() *netstack.Net LastActivitiesFunc func() map[string]monotime.Time } @@ -202,6 +204,13 @@ func (m *MockWGIface) GetProxy() wgproxy.Proxy { return m.GetProxyFunc() } +func (m *MockWGIface) GetProxyPort() uint16 { + if m.GetProxyPortFunc != nil { + return m.GetProxyPortFunc() + } + return 0 +} + func (m *MockWGIface) GetNet() *netstack.Net { return m.GetNetFunc() } @@ -213,6 +222,10 @@ func (m *MockWGIface) LastActivities() map[string]monotime.Time { return nil } +func (m *MockWGIface) SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error { + return nil +} + func TestMain(m *testing.M) { _ = util.InitLog("debug", util.LogConsole) code := m.Run() @@ -1599,6 +1612,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri permissionsManager := permissions.NewManager(store) peersManager := peers.NewManager(store, permissionsManager) + jobManager := job.NewJobManager(nil, store, peersManager) ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, nil, eventStore) @@ -1622,7 +1636,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := server.NewAccountRequestBuffer(context.Background(), store) networkMapController := controller.NewController(context.Background(), store, metrics, updateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config) - accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, "", err } @@ -1631,7 +1645,7 @@ func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, stri if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, "", err } diff --git a/client/internal/iface_common.go b/client/internal/iface_common.go index 90b06cbd1..39e9bacfa 100644 --- a/client/internal/iface_common.go +++ b/client/internal/iface_common.go @@ -28,6 +28,7 @@ type wgIfaceBase interface { Up() (*udpmux.UniversalUDPMuxDefault, error) UpdateAddr(newAddr string) error GetProxy() wgproxy.Proxy + GetProxyPort() uint16 UpdatePeer(peerKey string, allowedIps []netip.Prefix, keepAlive time.Duration, endpoint *net.UDPAddr, preSharedKey *wgtypes.Key) error RemoveEndpointAddress(key string) error RemovePeer(peerKey string) error @@ -42,4 +43,5 @@ type wgIfaceBase interface { GetNet() *netstack.Net FullStats() (*configurer.Stats, error) LastActivities() map[string]monotime.Time + SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error } diff --git a/client/internal/login.go b/client/internal/login.go deleted file mode 100644 index f528783ef..000000000 --- a/client/internal/login.go +++ /dev/null @@ -1,201 +0,0 @@ -package internal - -import ( - "context" - "net/url" - - "github.com/google/uuid" - log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/netbirdio/netbird/client/internal/profilemanager" - "github.com/netbirdio/netbird/client/ssh" - "github.com/netbirdio/netbird/client/system" - mgm "github.com/netbirdio/netbird/shared/management/client" - mgmProto "github.com/netbirdio/netbird/shared/management/proto" -) - -// IsLoginRequired check that the server is support SSO or not -func IsLoginRequired(ctx context.Context, config *profilemanager.Config) (bool, error) { - mgmURL := config.ManagementURL - mgmClient, err := getMgmClient(ctx, config.PrivateKey, mgmURL) - if err != nil { - return false, err - } - defer func() { - err = mgmClient.Close() - if err != nil { - cStatus, ok := status.FromError(err) - if !ok || ok && cStatus.Code() != codes.Canceled { - log.Warnf("failed to close the Management service client, err: %v", err) - } - } - }() - log.Debugf("connected to the Management service %s", mgmURL.String()) - - pubSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey)) - if err != nil { - return false, err - } - - _, _, err = doMgmLogin(ctx, mgmClient, pubSSHKey, config) - if isLoginNeeded(err) { - return true, nil - } - return false, err -} - -// Login or register the client -func Login(ctx context.Context, config *profilemanager.Config, setupKey string, jwtToken string) error { - mgmClient, err := getMgmClient(ctx, config.PrivateKey, config.ManagementURL) - if err != nil { - return err - } - defer func() { - err = mgmClient.Close() - if err != nil { - cStatus, ok := status.FromError(err) - if !ok || ok && cStatus.Code() != codes.Canceled { - log.Warnf("failed to close the Management service client, err: %v", err) - } - } - }() - log.Debugf("connected to the Management service %s", config.ManagementURL.String()) - - pubSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey)) - if err != nil { - return err - } - - serverKey, _, err := doMgmLogin(ctx, mgmClient, pubSSHKey, config) - if serverKey != nil && isRegistrationNeeded(err) { - log.Debugf("peer registration required") - _, err = registerPeer(ctx, *serverKey, mgmClient, setupKey, jwtToken, pubSSHKey, config) - if err != nil { - return err - } - } else if err != nil { - return err - } - - return nil -} - -func getMgmClient(ctx context.Context, privateKey string, mgmURL *url.URL) (*mgm.GrpcClient, error) { - // validate our peer's Wireguard PRIVATE key - myPrivateKey, err := wgtypes.ParseKey(privateKey) - if err != nil { - log.Errorf("failed parsing Wireguard key %s: [%s]", privateKey, err.Error()) - return nil, err - } - - var mgmTlsEnabled bool - if mgmURL.Scheme == "https" { - mgmTlsEnabled = true - } - - log.Debugf("connecting to the Management service %s", mgmURL.String()) - mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTlsEnabled) - if err != nil { - log.Errorf("failed connecting to the Management service %s %v", mgmURL.String(), err) - return nil, err - } - return mgmClient, err -} - -func doMgmLogin(ctx context.Context, mgmClient *mgm.GrpcClient, pubSSHKey []byte, config *profilemanager.Config) (*wgtypes.Key, *mgmProto.LoginResponse, error) { - serverKey, err := mgmClient.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, nil, err - } - - sysInfo := system.GetInfo(ctx) - sysInfo.SetFlags( - config.RosenpassEnabled, - config.RosenpassPermissive, - config.ServerSSHAllowed, - config.DisableClientRoutes, - config.DisableServerRoutes, - config.DisableDNS, - config.DisableFirewall, - config.BlockLANAccess, - config.BlockInbound, - config.LazyConnectionEnabled, - config.EnableSSHRoot, - config.EnableSSHSFTP, - config.EnableSSHLocalPortForwarding, - config.EnableSSHRemotePortForwarding, - config.DisableSSHAuth, - ) - loginResp, err := mgmClient.Login(*serverKey, sysInfo, pubSSHKey, config.DNSLabels) - return serverKey, loginResp, err -} - -// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key. -// Otherwise tries to register with the provided setupKey via command line. -func registerPeer(ctx context.Context, serverPublicKey wgtypes.Key, client *mgm.GrpcClient, setupKey string, jwtToken string, pubSSHKey []byte, config *profilemanager.Config) (*mgmProto.LoginResponse, error) { - validSetupKey, err := uuid.Parse(setupKey) - if err != nil && jwtToken == "" { - return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err) - } - - log.Debugf("sending peer registration request to Management Service") - info := system.GetInfo(ctx) - info.SetFlags( - config.RosenpassEnabled, - config.RosenpassPermissive, - config.ServerSSHAllowed, - config.DisableClientRoutes, - config.DisableServerRoutes, - config.DisableDNS, - config.DisableFirewall, - config.BlockLANAccess, - config.BlockInbound, - config.LazyConnectionEnabled, - config.EnableSSHRoot, - config.EnableSSHSFTP, - config.EnableSSHLocalPortForwarding, - config.EnableSSHRemotePortForwarding, - config.DisableSSHAuth, - ) - loginResp, err := client.Register(serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, config.DNSLabels) - if err != nil { - log.Errorf("failed registering peer %v", err) - return nil, err - } - - log.Infof("peer has been successfully registered on Management Service") - - return loginResp, nil -} - -func isLoginNeeded(err error) bool { - if err == nil { - return false - } - s, ok := status.FromError(err) - if !ok { - return false - } - if s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied { - return true - } - return false -} - -func isRegistrationNeeded(err error) bool { - if err == nil { - return false - } - s, ok := status.FromError(err) - if !ok { - return false - } - if s.Code() == codes.PermissionDenied { - return true - } - return false -} diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index 80ca36789..39133a6d3 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -88,8 +88,9 @@ type Conn struct { relayManager *relayClient.Manager srWatcher *guard.SRWatcher - onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) - onDisconnected func(remotePeer string) + onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) + onDisconnected func(remotePeer string) + rosenpassInitializedPresharedKeyValidator func(peerKey string) bool statusRelay *worker.AtomicWorkerStatus statusICE *worker.AtomicWorkerStatus @@ -98,7 +99,10 @@ type Conn struct { workerICE *WorkerICE workerRelay *WorkerRelay - wgWatcherWg sync.WaitGroup + + wgWatcher *WGWatcher + wgWatcherWg sync.WaitGroup + wgWatcherCancel context.CancelFunc // used to store the remote Rosenpass key for Relayed connection in case of connection update from ice rosenpassRemoteKey []byte @@ -126,6 +130,7 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { connLog := log.WithField("peer", config.Key) + dumpState := newStateDump(config.Key, connLog, services.StatusRecorder) var conn = &Conn{ Log: connLog, config: config, @@ -137,8 +142,9 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { semaphore: services.Semaphore, statusRelay: worker.NewAtomicStatus(), statusICE: worker.NewAtomicStatus(), - dumpState: newStateDump(config.Key, connLog, services.StatusRecorder), + dumpState: dumpState, endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), + wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), } return conn, nil @@ -162,7 +168,7 @@ func (conn *Conn) Open(engineCtx context.Context) error { conn.ctx, conn.ctxCancel = context.WithCancel(engineCtx) - conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager, conn.dumpState) + conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager) relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally) @@ -231,7 +237,9 @@ func (conn *Conn) Close(signalToRemote bool) { conn.Log.Infof("close peer connection") conn.ctxCancel() - conn.workerRelay.DisableWgWatcher() + if conn.wgWatcherCancel != nil { + conn.wgWatcherCancel() + } conn.workerRelay.CloseConn() conn.workerICE.Close() @@ -289,6 +297,13 @@ func (conn *Conn) SetOnDisconnected(handler func(remotePeer string)) { conn.onDisconnected = handler } +// SetRosenpassInitializedPresharedKeyValidator sets a function to check if Rosenpass has taken over +// PSK management for a peer. When this returns true, presharedKey() returns nil +// to prevent UpdatePeer from overwriting the Rosenpass-managed PSK. +func (conn *Conn) SetRosenpassInitializedPresharedKeyValidator(handler func(peerKey string) bool) { + conn.rosenpassInitializedPresharedKeyValidator = handler +} + func (conn *Conn) OnRemoteOffer(offer OfferAnswer) { conn.dumpState.RemoteOffer() conn.Log.Infof("OnRemoteOffer, on status ICE: %s, status Relay: %s", conn.statusICE, conn.statusRelay) @@ -366,9 +381,6 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn ep = directEp } - conn.workerRelay.DisableWgWatcher() - // todo consider to run conn.wgWatcherWg.Wait() here - if conn.wgProxyRelay != nil { conn.wgProxyRelay.Pause() } @@ -390,6 +402,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn conn.wgProxyRelay.RedirectAs(ep) } + conn.enableWgWatcherIfNeeded() + conn.currentConnPriority = priority conn.statusICE.SetConnected() conn.updateIceState(iceConnInfo) @@ -423,11 +437,6 @@ func (conn *Conn) onICEStateDisconnected() { conn.Log.Errorf("failed to switch to relay conn: %v", err) } - conn.wgWatcherWg.Add(1) - go func() { - defer conn.wgWatcherWg.Done() - conn.workerRelay.EnableWgWatcher(conn.ctx) - }() conn.wgProxyRelay.Work() conn.currentConnPriority = conntype.Relay } else { @@ -444,15 +453,15 @@ func (conn *Conn) onICEStateDisconnected() { } conn.statusICE.SetDisconnected() + conn.disableWgWatcherIfNeeded() + peerState := State{ PubKey: conn.config.Key, ConnStatus: conn.evalStatus(), Relayed: conn.isRelayed(), ConnStatusUpdate: time.Now(), } - - err := conn.statusRecorder.UpdatePeerICEStateToDisconnected(peerState) - if err != nil { + if err := conn.statusRecorder.UpdatePeerICEStateToDisconnected(peerState); err != nil { conn.Log.Warnf("unable to set peer's state to disconnected ice, got error: %v", err) } } @@ -500,11 +509,7 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { return } - conn.wgWatcherWg.Add(1) - go func() { - defer conn.wgWatcherWg.Done() - conn.workerRelay.EnableWgWatcher(conn.ctx) - }() + conn.enableWgWatcherIfNeeded() wgConfigWorkaround() conn.rosenpassRemoteKey = rci.rosenpassPubKey @@ -519,7 +524,11 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { func (conn *Conn) onRelayDisconnected() { conn.mu.Lock() defer conn.mu.Unlock() + conn.handleRelayDisconnectedLocked() +} +// handleRelayDisconnectedLocked handles relay disconnection. Caller must hold conn.mu. +func (conn *Conn) handleRelayDisconnectedLocked() { if conn.ctx.Err() != nil { return } @@ -545,6 +554,8 @@ func (conn *Conn) onRelayDisconnected() { } conn.statusRelay.SetDisconnected() + conn.disableWgWatcherIfNeeded() + peerState := State{ PubKey: conn.config.Key, ConnStatus: conn.evalStatus(), @@ -563,6 +574,28 @@ func (conn *Conn) onGuardEvent() { } } +func (conn *Conn) onWGDisconnected() { + conn.mu.Lock() + defer conn.mu.Unlock() + + if conn.ctx.Err() != nil { + return + } + + conn.Log.Warnf("WireGuard handshake timeout detected, closing current connection") + + // Close the active connection based on current priority + switch conn.currentConnPriority { + case conntype.Relay: + conn.workerRelay.CloseConn() + conn.handleRelayDisconnectedLocked() + case conntype.ICEP2P, conntype.ICETurn: + conn.workerICE.Close() + default: + conn.Log.Debugf("No active connection to close on WG timeout") + } +} + func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte) { peerState := State{ PubKey: conn.config.Key, @@ -689,6 +722,25 @@ func (conn *Conn) isConnectedOnAllWay() (connected bool) { return true } +func (conn *Conn) enableWgWatcherIfNeeded() { + if !conn.wgWatcher.IsEnabled() { + wgWatcherCtx, wgWatcherCancel := context.WithCancel(conn.ctx) + conn.wgWatcherCancel = wgWatcherCancel + conn.wgWatcherWg.Add(1) + go func() { + defer conn.wgWatcherWg.Done() + conn.wgWatcher.EnableWgWatcher(wgWatcherCtx, conn.onWGDisconnected) + }() + } +} + +func (conn *Conn) disableWgWatcherIfNeeded() { + if conn.currentConnPriority == conntype.None && conn.wgWatcherCancel != nil { + conn.wgWatcherCancel() + conn.wgWatcherCancel = nil + } +} + func (conn *Conn) newProxy(remoteConn net.Conn) (wgproxy.Proxy, error) { conn.Log.Debugf("setup proxied WireGuard connection") udpAddr := &net.UDPAddr{ @@ -759,10 +811,24 @@ func (conn *Conn) presharedKey(remoteRosenpassKey []byte) *wgtypes.Key { return conn.config.WgConfig.PreSharedKey } + // If Rosenpass has already set a PSK for this peer, return nil to prevent + // UpdatePeer from overwriting the Rosenpass-managed key. + if conn.rosenpassInitializedPresharedKeyValidator != nil && conn.rosenpassInitializedPresharedKeyValidator(conn.config.Key) { + return nil + } + + // Use NetBird PSK as the seed for Rosenpass. This same PSK is passed to + // Rosenpass as PeerConfig.PresharedKey, ensuring the derived post-quantum + // key is cryptographically bound to the original secret. + if conn.config.WgConfig.PreSharedKey != nil { + return conn.config.WgConfig.PreSharedKey + } + + // Fallback to deterministic key if no NetBird PSK is configured determKey, err := conn.rosenpassDetermKey() if err != nil { conn.Log.Errorf("failed to generate Rosenpass initial key: %v", err) - return conn.config.WgConfig.PreSharedKey + return nil } return determKey diff --git a/client/internal/peer/conn_test.go b/client/internal/peer/conn_test.go index 6b47f95eb..32383b530 100644 --- a/client/internal/peer/conn_test.go +++ b/client/internal/peer/conn_test.go @@ -284,3 +284,27 @@ func TestConn_presharedKey(t *testing.T) { }) } } + +func TestConn_presharedKey_RosenpassManaged(t *testing.T) { + conn := Conn{ + config: ConnConfig{ + Key: "LLHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=", + LocalKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=", + RosenpassConfig: RosenpassConfig{PubKey: []byte("dummykey")}, + }, + } + + // When Rosenpass has already initialized the PSK for this peer, + // presharedKey must return nil to avoid UpdatePeer overwriting it. + conn.rosenpassInitializedPresharedKeyValidator = func(peerKey string) bool { return true } + if k := conn.presharedKey([]byte("remote")); k != nil { + t.Fatalf("expected nil presharedKey when Rosenpass manages PSK, got %v", k) + } + + // When Rosenpass hasn't taken over yet, presharedKey should provide + // a non-nil initial key (deterministic or from NetBird PSK). + conn.rosenpassInitializedPresharedKeyValidator = func(peerKey string) bool { return false } + if k := conn.presharedKey([]byte("remote")); k == nil { + t.Fatalf("expected non-nil presharedKey before Rosenpass manages PSK") + } +} diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index 697bda2ff..abedc208e 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -1145,6 +1145,38 @@ func (d *Status) PeersStatus() (*configurer.Stats, error) { return d.wgIface.FullStats() } +// RefreshWireGuardStats fetches fresh WireGuard statistics from the interface +// and updates the cached peer states. This ensures accurate handshake times and +// transfer statistics in status reports without running full health probes. +func (d *Status) RefreshWireGuardStats() error { + d.mux.Lock() + defer d.mux.Unlock() + + if d.wgIface == nil { + return nil // silently skip if interface not set + } + + stats, err := d.wgIface.FullStats() + if err != nil { + return fmt.Errorf("get wireguard stats: %w", err) + } + + // Update each peer's WireGuard statistics + for _, peerStats := range stats.Peers { + peerState, ok := d.peers[peerStats.PublicKey] + if !ok { + continue + } + + peerState.LastWireguardHandshake = peerStats.LastHandshake + peerState.BytesRx = peerStats.RxBytes + peerState.BytesTx = peerStats.TxBytes + d.peers[peerStats.PublicKey] = peerState + } + + return nil +} + type EventQueue struct { maxSize int events []*proto.SystemEvent diff --git a/client/internal/peer/wg_watcher.go b/client/internal/peer/wg_watcher.go index 0ed200fda..d40ec7a80 100644 --- a/client/internal/peer/wg_watcher.go +++ b/client/internal/peer/wg_watcher.go @@ -30,10 +30,8 @@ type WGWatcher struct { peerKey string stateDump *stateDump - ctx context.Context - ctxCancel context.CancelFunc - ctxLock sync.Mutex - enabledTime time.Time + enabled bool + muEnabled sync.RWMutex } func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey string, stateDump *stateDump) *WGWatcher { @@ -46,52 +44,44 @@ func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey strin } // EnableWgWatcher starts the WireGuard watcher. If it is already enabled, it will return immediately and do nothing. -func (w *WGWatcher) EnableWgWatcher(parentCtx context.Context, onDisconnectedFn func()) { - w.log.Debugf("enable WireGuard watcher") - w.ctxLock.Lock() - w.enabledTime = time.Now() - - if w.ctx != nil && w.ctx.Err() == nil { - w.log.Errorf("WireGuard watcher already enabled") - w.ctxLock.Unlock() +// The watcher runs until ctx is cancelled. Caller is responsible for context lifecycle management. +func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func()) { + w.muEnabled.Lock() + if w.enabled { + w.muEnabled.Unlock() return } - ctx, ctxCancel := context.WithCancel(parentCtx) - w.ctx = ctx - w.ctxCancel = ctxCancel - w.ctxLock.Unlock() + w.log.Debugf("enable WireGuard watcher") + enabledTime := time.Now() + w.enabled = true + w.muEnabled.Unlock() initialHandshake, err := w.wgState() if err != nil { w.log.Warnf("failed to read initial wg stats: %v", err) } - w.periodicHandshakeCheck(ctx, ctxCancel, onDisconnectedFn, initialHandshake) + w.periodicHandshakeCheck(ctx, onDisconnectedFn, enabledTime, initialHandshake) + + w.muEnabled.Lock() + w.enabled = false + w.muEnabled.Unlock() } -// DisableWgWatcher stops the WireGuard watcher and wait for the watcher to exit -func (w *WGWatcher) DisableWgWatcher() { - w.ctxLock.Lock() - defer w.ctxLock.Unlock() - - if w.ctxCancel == nil { - return - } - - w.log.Debugf("disable WireGuard watcher") - - w.ctxCancel() - w.ctxCancel = nil +// IsEnabled returns true if the WireGuard watcher is currently enabled +func (w *WGWatcher) IsEnabled() bool { + w.muEnabled.RLock() + defer w.muEnabled.RUnlock() + return w.enabled } // wgStateCheck help to check the state of the WireGuard handshake and relay connection -func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, ctxCancel context.CancelFunc, onDisconnectedFn func(), initialHandshake time.Time) { +func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), enabledTime time.Time, initialHandshake time.Time) { w.log.Infof("WireGuard watcher started") timer := time.NewTimer(wgHandshakeOvertime) defer timer.Stop() - defer ctxCancel() lastHandshake := initialHandshake @@ -104,7 +94,7 @@ func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, ctxCancel contex return } if lastHandshake.IsZero() { - elapsed := handshake.Sub(w.enabledTime).Seconds() + elapsed := calcElapsed(enabledTime, *handshake) w.log.Infof("first wg handshake detected within: %.2fsec, (%s)", elapsed, handshake) } @@ -134,19 +124,19 @@ func (w *WGWatcher) handshakeCheck(lastHandshake time.Time) (*time.Time, bool) { // the current know handshake did not change if handshake.Equal(lastHandshake) { - w.log.Warnf("WireGuard handshake timed out, closing relay connection: %v", handshake) + w.log.Warnf("WireGuard handshake timed out: %v", handshake) return nil, false } // in case if the machine is suspended, the handshake time will be in the past if handshake.Add(checkPeriod).Before(time.Now()) { - w.log.Warnf("WireGuard handshake timed out, closing relay connection: %v", handshake) + w.log.Warnf("WireGuard handshake timed out: %v", handshake) return nil, false } // error handling for handshake time in the future if handshake.After(time.Now()) { - w.log.Warnf("WireGuard handshake is in the future, closing relay connection: %v", handshake) + w.log.Warnf("WireGuard handshake is in the future: %v", handshake) return nil, false } @@ -164,3 +154,13 @@ func (w *WGWatcher) wgState() (time.Time, error) { } return wgState.LastHandshake, nil } + +// calcElapsed calculates elapsed time since watcher was enabled. +// The watcher started after the wg configuration happens, because of this need to normalise the negative value +func calcElapsed(enabledTime, handshake time.Time) float64 { + elapsed := handshake.Sub(enabledTime).Seconds() + if elapsed < 0 { + elapsed = 0 + } + return elapsed +} diff --git a/client/internal/peer/wg_watcher_test.go b/client/internal/peer/wg_watcher_test.go index d7c277eff..f79405a01 100644 --- a/client/internal/peer/wg_watcher_test.go +++ b/client/internal/peer/wg_watcher_test.go @@ -2,6 +2,7 @@ package peer import ( "context" + "sync" "testing" "time" @@ -48,7 +49,6 @@ func TestWGWatcher_EnableWgWatcher(t *testing.T) { case <-time.After(10 * time.Second): t.Errorf("timeout") } - watcher.DisableWgWatcher() } func TestWGWatcher_ReEnable(t *testing.T) { @@ -60,14 +60,21 @@ func TestWGWatcher_ReEnable(t *testing.T) { watcher := NewWGWatcher(mlog, mocWgIface, "", newStateDump("peer", mlog, &Status{})) ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + watcher.EnableWgWatcher(ctx, func() {}) + }() + cancel() + + wg.Wait() + + // Re-enable with a new context + ctx, cancel = context.WithCancel(context.Background()) defer cancel() onDisconnected := make(chan struct{}, 1) - - go watcher.EnableWgWatcher(ctx, func() {}) - time.Sleep(1 * time.Second) - watcher.DisableWgWatcher() - go watcher.EnableWgWatcher(ctx, func() { onDisconnected <- struct{}{} }) @@ -80,5 +87,4 @@ func TestWGWatcher_ReEnable(t *testing.T) { case <-time.After(10 * time.Second): t.Errorf("timeout") } - watcher.DisableWgWatcher() } diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 840fc9241..b6b9d2cf4 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "net/netip" + "strconv" "sync" "time" @@ -286,8 +287,8 @@ func (w *WorkerICE) connect(ctx context.Context, agent *icemaker.ThreadSafeAgent RosenpassAddr: remoteOfferAnswer.RosenpassAddr, LocalIceCandidateType: pair.Local.Type().String(), RemoteIceCandidateType: pair.Remote.Type().String(), - LocalIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Local.Address(), pair.Local.Port()), - RemoteIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Remote.Address(), pair.Remote.Port()), + LocalIceCandidateEndpoint: net.JoinHostPort(pair.Local.Address(), strconv.Itoa(pair.Local.Port())), + RemoteIceCandidateEndpoint: net.JoinHostPort(pair.Remote.Address(), strconv.Itoa(pair.Remote.Port())), Relayed: isRelayed(pair), RelayedOnLocal: isRelayCandidate(pair.Local), } @@ -328,13 +329,7 @@ func (w *WorkerICE) closeAgent(agent *icemaker.ThreadSafeAgent, cancel context.C func (w *WorkerICE) punchRemoteWGPort(pair *ice.CandidatePair, remoteWgPort int) { // wait local endpoint configuration time.Sleep(time.Second) - addrString := pair.Remote.Address() - parsed, err := netip.ParseAddr(addrString) - if (err == nil) && (parsed.Is6()) { - addrString = fmt.Sprintf("[%s]", addrString) - //IPv6 Literals need to be wrapped in brackets for Resolve*Addr() - } - addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", addrString, remoteWgPort)) + addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(pair.Remote.Address(), strconv.Itoa(remoteWgPort))) if err != nil { w.log.Warnf("got an error while resolving the udp address, err: %s", err) return @@ -386,12 +381,44 @@ func (w *WorkerICE) onICESelectedCandidatePair(agent *icemaker.ThreadSafeAgent, } } +func (w *WorkerICE) logSuccessfulPaths(agent *icemaker.ThreadSafeAgent) { + sessionID := w.SessionID() + stats := agent.GetCandidatePairsStats() + localCandidates, _ := agent.GetLocalCandidates() + remoteCandidates, _ := agent.GetRemoteCandidates() + + localMap := make(map[string]ice.Candidate) + for _, c := range localCandidates { + localMap[c.ID()] = c + } + remoteMap := make(map[string]ice.Candidate) + for _, c := range remoteCandidates { + remoteMap[c.ID()] = c + } + + for _, stat := range stats { + if stat.State == ice.CandidatePairStateSucceeded { + local, lok := localMap[stat.LocalCandidateID] + remote, rok := remoteMap[stat.RemoteCandidateID] + if !lok || !rok { + continue + } + w.log.Debugf("successful ICE path %s: [%s %s %s] <-> [%s %s %s] rtt=%.3fms", + sessionID, + local.NetworkType(), local.Type(), local.Address(), + remote.NetworkType(), remote.Type(), remote.Address(), + stat.CurrentRoundTripTime*1000) + } + } +} + func (w *WorkerICE) onConnectionStateChange(agent *icemaker.ThreadSafeAgent, dialerCancel context.CancelFunc) func(ice.ConnectionState) { return func(state ice.ConnectionState) { w.log.Debugf("ICE ConnectionState has changed to %s", state.String()) switch state { case ice.ConnectionStateConnected: w.lastKnownState = ice.ConnectionStateConnected + w.logSuccessfulPaths(agent) return case ice.ConnectionStateFailed, ice.ConnectionStateDisconnected, ice.ConnectionStateClosed: // ice.ConnectionStateClosed happens when we recreate the agent. For the P2P to TURN switch important to diff --git a/client/internal/peer/worker_relay.go b/client/internal/peer/worker_relay.go index f584487f5..06309fbaf 100644 --- a/client/internal/peer/worker_relay.go +++ b/client/internal/peer/worker_relay.go @@ -30,11 +30,9 @@ type WorkerRelay struct { relayLock sync.Mutex relaySupportedOnRemotePeer atomic.Bool - - wgWatcher *WGWatcher } -func NewWorkerRelay(ctx context.Context, log *log.Entry, ctrl bool, config ConnConfig, conn *Conn, relayManager *relayClient.Manager, stateDump *stateDump) *WorkerRelay { +func NewWorkerRelay(ctx context.Context, log *log.Entry, ctrl bool, config ConnConfig, conn *Conn, relayManager *relayClient.Manager) *WorkerRelay { r := &WorkerRelay{ peerCtx: ctx, log: log, @@ -42,7 +40,6 @@ func NewWorkerRelay(ctx context.Context, log *log.Entry, ctrl bool, config ConnC config: config, conn: conn, relayManager: relayManager, - wgWatcher: NewWGWatcher(log, config.WgConfig.WgInterface, config.Key, stateDump), } return r } @@ -93,14 +90,6 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { }) } -func (w *WorkerRelay) EnableWgWatcher(ctx context.Context) { - w.wgWatcher.EnableWgWatcher(ctx, w.onWGDisconnected) -} - -func (w *WorkerRelay) DisableWgWatcher() { - w.wgWatcher.DisableWgWatcher() -} - func (w *WorkerRelay) RelayInstanceAddress() (string, error) { return w.relayManager.RelayInstanceAddress() } @@ -125,14 +114,6 @@ func (w *WorkerRelay) CloseConn() { } } -func (w *WorkerRelay) onWGDisconnected() { - w.relayLock.Lock() - _ = w.relayedConn.Close() - w.relayLock.Unlock() - - w.conn.onRelayDisconnected() -} - func (w *WorkerRelay) isRelaySupported(answer *OfferAnswer) bool { if !w.relayManager.HasRelayAddress() { return false @@ -148,6 +129,5 @@ func (w *WorkerRelay) preferredRelayServer(myRelayAddress, remoteRelayAddress st } func (w *WorkerRelay) onRelayClientDisconnected() { - w.wgWatcher.DisableWgWatcher() go w.conn.onRelayDisconnected() } diff --git a/client/internal/pkce_auth.go b/client/internal/pkce_auth.go deleted file mode 100644 index 23c92e8af..000000000 --- a/client/internal/pkce_auth.go +++ /dev/null @@ -1,138 +0,0 @@ -package internal - -import ( - "context" - "crypto/tls" - "fmt" - "net/url" - - log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - mgm "github.com/netbirdio/netbird/shared/management/client" - "github.com/netbirdio/netbird/shared/management/client/common" -) - -// PKCEAuthorizationFlow represents PKCE Authorization Flow information -type PKCEAuthorizationFlow struct { - ProviderConfig PKCEAuthProviderConfig -} - -// PKCEAuthProviderConfig has all attributes needed to initiate pkce authorization flow -type PKCEAuthProviderConfig struct { - // ClientID An IDP application client id - ClientID string - // ClientSecret An IDP application client secret - ClientSecret string - // Audience An Audience for to authorization validation - Audience string - // TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token - TokenEndpoint string - // AuthorizationEndpoint is the endpoint of an IDP manager where clients can obtain authorization code - AuthorizationEndpoint string - // Scopes provides the scopes to be included in the token request - Scope string - // RedirectURL handles authorization code from IDP manager - RedirectURLs []string - // UseIDToken indicates if the id token should be used for authentication - UseIDToken bool - // ClientCertPair is used for mTLS authentication to the IDP - ClientCertPair *tls.Certificate - // DisablePromptLogin makes the PKCE flow to not prompt the user for login - DisablePromptLogin bool - // LoginFlag is used to configure the PKCE flow login behavior - LoginFlag common.LoginFlag - // LoginHint is used to pre-fill the email/username field during authentication - LoginHint string -} - -// GetPKCEAuthorizationFlowInfo initialize a PKCEAuthorizationFlow instance and return with it -func GetPKCEAuthorizationFlowInfo(ctx context.Context, privateKey string, mgmURL *url.URL, clientCert *tls.Certificate) (PKCEAuthorizationFlow, error) { - // validate our peer's Wireguard PRIVATE key - myPrivateKey, err := wgtypes.ParseKey(privateKey) - if err != nil { - log.Errorf("failed parsing Wireguard key %s: [%s]", privateKey, err.Error()) - return PKCEAuthorizationFlow{}, err - } - - var mgmTLSEnabled bool - if mgmURL.Scheme == "https" { - mgmTLSEnabled = true - } - - log.Debugf("connecting to Management Service %s", mgmURL.String()) - mgmClient, err := mgm.NewClient(ctx, mgmURL.Host, myPrivateKey, mgmTLSEnabled) - if err != nil { - log.Errorf("failed connecting to Management Service %s %v", mgmURL.String(), err) - return PKCEAuthorizationFlow{}, err - } - log.Debugf("connected to the Management service %s", mgmURL.String()) - - defer func() { - err = mgmClient.Close() - if err != nil { - log.Warnf("failed to close the Management service client %v", err) - } - }() - - serverKey, err := mgmClient.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return PKCEAuthorizationFlow{}, err - } - - protoPKCEAuthorizationFlow, err := mgmClient.GetPKCEAuthorizationFlow(*serverKey) - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - log.Warnf("server couldn't find pkce flow, contact admin: %v", err) - return PKCEAuthorizationFlow{}, err - } - log.Errorf("failed to retrieve pkce flow: %v", err) - return PKCEAuthorizationFlow{}, err - } - - authFlow := PKCEAuthorizationFlow{ - ProviderConfig: PKCEAuthProviderConfig{ - Audience: protoPKCEAuthorizationFlow.GetProviderConfig().GetAudience(), - ClientID: protoPKCEAuthorizationFlow.GetProviderConfig().GetClientID(), - ClientSecret: protoPKCEAuthorizationFlow.GetProviderConfig().GetClientSecret(), - TokenEndpoint: protoPKCEAuthorizationFlow.GetProviderConfig().GetTokenEndpoint(), - AuthorizationEndpoint: protoPKCEAuthorizationFlow.GetProviderConfig().GetAuthorizationEndpoint(), - Scope: protoPKCEAuthorizationFlow.GetProviderConfig().GetScope(), - RedirectURLs: protoPKCEAuthorizationFlow.GetProviderConfig().GetRedirectURLs(), - UseIDToken: protoPKCEAuthorizationFlow.GetProviderConfig().GetUseIDToken(), - ClientCertPair: clientCert, - DisablePromptLogin: protoPKCEAuthorizationFlow.GetProviderConfig().GetDisablePromptLogin(), - LoginFlag: common.LoginFlag(protoPKCEAuthorizationFlow.GetProviderConfig().GetLoginFlag()), - }, - } - - err = isPKCEProviderConfigValid(authFlow.ProviderConfig) - if err != nil { - return PKCEAuthorizationFlow{}, err - } - - return authFlow, nil -} - -func isPKCEProviderConfigValid(config PKCEAuthProviderConfig) error { - errorMSGFormat := "invalid provider configuration received from management: %s value is empty. Contact your NetBird administrator" - if config.ClientID == "" { - return fmt.Errorf(errorMSGFormat, "Client ID") - } - if config.TokenEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Token Endpoint") - } - if config.AuthorizationEndpoint == "" { - return fmt.Errorf(errorMSGFormat, "Authorization Auth Endpoint") - } - if config.Scope == "" { - return fmt.Errorf(errorMSGFormat, "PKCE Auth Scopes") - } - if config.RedirectURLs == nil { - return fmt.Errorf(errorMSGFormat, "PKCE Redirect URLs") - } - return nil -} diff --git a/client/internal/rosenpass/manager.go b/client/internal/rosenpass/manager.go index d2d7408fd..1faa22dc5 100644 --- a/client/internal/rosenpass/manager.go +++ b/client/internal/rosenpass/manager.go @@ -17,6 +17,11 @@ import ( "golang.zx2c4.com/wireguard/wgctrl/wgtypes" ) +const ( + defaultLog = slog.LevelInfo + defaultLogLevelVar = "NB_ROSENPASS_LOG_LEVEL" +) + func hashRosenpassKey(key []byte) string { hasher := sha256.New() hasher.Write(key) @@ -34,6 +39,7 @@ type Manager struct { server *rp.Server lock sync.Mutex port int + wgIface PresharedKeySetter } // NewManager creates a new Rosenpass manager @@ -44,7 +50,7 @@ func NewManager(preSharedKey *wgtypes.Key, wgIfaceName string) (*Manager, error) } rpKeyHash := hashRosenpassKey(public) - log.Debugf("generated new rosenpass key pair with public key %s", rpKeyHash) + log.Tracef("generated new rosenpass key pair with public key %s", rpKeyHash) return &Manager{ifaceName: wgIfaceName, rpKeyHash: rpKeyHash, spk: public, ssk: secret, preSharedKey: (*[32]byte)(preSharedKey), rpPeerIDs: make(map[string]*rp.PeerID), lock: sync.Mutex{}}, nil } @@ -100,7 +106,7 @@ func (m *Manager) removePeer(wireGuardPubKey string) error { func (m *Manager) generateConfig() (rp.Config, error) { opts := &slog.HandlerOptions{ - Level: slog.LevelDebug, + Level: getLogLevel(), } logger := slog.New(slog.NewTextHandler(os.Stdout, opts)) cfg := rp.Config{Logger: logger} @@ -109,7 +115,13 @@ func (m *Manager) generateConfig() (rp.Config, error) { cfg.SecretKey = m.ssk cfg.Peers = []rp.PeerConfig{} - m.rpWgHandler, _ = NewNetbirdHandler(m.preSharedKey, m.ifaceName) + + m.lock.Lock() + m.rpWgHandler = NewNetbirdHandler() + if m.wgIface != nil { + m.rpWgHandler.SetInterface(m.wgIface) + } + m.lock.Unlock() cfg.Handlers = []rp.Handler{m.rpWgHandler} @@ -126,6 +138,26 @@ func (m *Manager) generateConfig() (rp.Config, error) { return cfg, nil } +func getLogLevel() slog.Level { + level, ok := os.LookupEnv(defaultLogLevelVar) + if !ok { + return defaultLog + } + switch strings.ToLower(level) { + case "debug": + return slog.LevelDebug + case "info": + return slog.LevelInfo + case "warn": + return slog.LevelWarn + case "error": + return slog.LevelError + default: + log.Warnf("unknown log level: %s. Using default %s", level, defaultLog.String()) + return defaultLog + } +} + func (m *Manager) OnDisconnected(peerKey string) { m.lock.Lock() defer m.lock.Unlock() @@ -172,6 +204,20 @@ func (m *Manager) Close() error { return nil } +// SetInterface sets the WireGuard interface for the rosenpass handler. +// This can be called before or after Run() - the interface will be stored +// and passed to the handler when it's created or updated immediately if +// already running. +func (m *Manager) SetInterface(iface PresharedKeySetter) { + m.lock.Lock() + defer m.lock.Unlock() + + m.wgIface = iface + if m.rpWgHandler != nil { + m.rpWgHandler.SetInterface(iface) + } +} + // OnConnected is a handler function that is triggered when a connection to a remote peer establishes func (m *Manager) OnConnected(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) { m.lock.Lock() @@ -192,6 +238,20 @@ func (m *Manager) OnConnected(remoteWireGuardKey string, remoteRosenpassPubKey [ } } +// IsPresharedKeyInitialized returns true if Rosenpass has completed a handshake +// and set a PSK for the given WireGuard peer. +func (m *Manager) IsPresharedKeyInitialized(wireGuardPubKey string) bool { + m.lock.Lock() + defer m.lock.Unlock() + + peerID, ok := m.rpPeerIDs[wireGuardPubKey] + if !ok || peerID == nil { + return false + } + + return m.rpWgHandler.IsPeerInitialized(*peerID) +} + func findRandomAvailableUDPPort() (int, error) { conn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) if err != nil { diff --git a/client/internal/rosenpass/netbird_handler.go b/client/internal/rosenpass/netbird_handler.go index 345f95c01..9de2409ef 100644 --- a/client/internal/rosenpass/netbird_handler.go +++ b/client/internal/rosenpass/netbird_handler.go @@ -1,46 +1,50 @@ package rosenpass import ( - "fmt" - "log/slog" + "sync" rp "cunicu.li/go-rosenpass" log "github.com/sirupsen/logrus" - "golang.zx2c4.com/wireguard/wgctrl" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" ) +// PresharedKeySetter is the interface for setting preshared keys on WireGuard peers. +// This minimal interface allows rosenpass to update PSKs without depending on the full WGIface. +type PresharedKeySetter interface { + SetPresharedKey(peerKey string, psk wgtypes.Key, updateOnly bool) error +} + type wireGuardPeer struct { Interface string PublicKey rp.Key } type NetbirdHandler struct { - ifaceName string - client *wgctrl.Client - peers map[rp.PeerID]wireGuardPeer - presharedKey [32]byte + mu sync.Mutex + iface PresharedKeySetter + peers map[rp.PeerID]wireGuardPeer + initializedPeers map[rp.PeerID]bool } -func NewNetbirdHandler(preSharedKey *[32]byte, wgIfaceName string) (hdlr *NetbirdHandler, err error) { - hdlr = &NetbirdHandler{ - ifaceName: wgIfaceName, - peers: map[rp.PeerID]wireGuardPeer{}, +func NewNetbirdHandler() *NetbirdHandler { + return &NetbirdHandler{ + peers: map[rp.PeerID]wireGuardPeer{}, + initializedPeers: map[rp.PeerID]bool{}, } +} - if preSharedKey != nil { - hdlr.presharedKey = *preSharedKey - } - - if hdlr.client, err = wgctrl.New(); err != nil { - return nil, fmt.Errorf("failed to creat WireGuard client: %w", err) - } - - return hdlr, nil +// SetInterface sets the WireGuard interface for the handler. +// This must be called after the WireGuard interface is created. +func (h *NetbirdHandler) SetInterface(iface PresharedKeySetter) { + h.mu.Lock() + defer h.mu.Unlock() + h.iface = iface } func (h *NetbirdHandler) AddPeer(pid rp.PeerID, intf string, pk rp.Key) { + h.mu.Lock() + defer h.mu.Unlock() h.peers[pid] = wireGuardPeer{ Interface: intf, PublicKey: pk, @@ -48,79 +52,61 @@ func (h *NetbirdHandler) AddPeer(pid rp.PeerID, intf string, pk rp.Key) { } func (h *NetbirdHandler) RemovePeer(pid rp.PeerID) { + h.mu.Lock() + defer h.mu.Unlock() delete(h.peers, pid) + delete(h.initializedPeers, pid) +} + +// IsPeerInitialized returns true if Rosenpass has completed a handshake +// and set a PSK for this peer. +func (h *NetbirdHandler) IsPeerInitialized(pid rp.PeerID) bool { + h.mu.Lock() + defer h.mu.Unlock() + return h.initializedPeers[pid] } func (h *NetbirdHandler) HandshakeCompleted(pid rp.PeerID, key rp.Key) { - log.Debug("Handshake complete") h.outputKey(rp.KeyOutputReasonStale, pid, key) } func (h *NetbirdHandler) HandshakeExpired(pid rp.PeerID) { key, _ := rp.GeneratePresharedKey() - log.Debug("Handshake expired") h.outputKey(rp.KeyOutputReasonStale, pid, key) } func (h *NetbirdHandler) outputKey(_ rp.KeyOutputReason, pid rp.PeerID, psk rp.Key) { + h.mu.Lock() + iface := h.iface wg, ok := h.peers[pid] + isInitialized := h.initializedPeers[pid] + h.mu.Unlock() + + if iface == nil { + log.Warn("rosenpass: interface not set, cannot update preshared key") + return + } + if !ok { return } - device, err := h.client.Device(h.ifaceName) - if err != nil { - log.Errorf("Failed to get WireGuard device: %v", err) + peerKey := wgtypes.Key(wg.PublicKey).String() + pskKey := wgtypes.Key(psk) + + // Use updateOnly=true for later rotations (peer already has Rosenpass PSK) + // Use updateOnly=false for first rotation (peer has original/empty PSK) + if err := iface.SetPresharedKey(peerKey, pskKey, isInitialized); err != nil { + log.Errorf("Failed to apply rosenpass key: %v", err) return } - config := []wgtypes.PeerConfig{ - { - UpdateOnly: true, - PublicKey: wgtypes.Key(wg.PublicKey), - PresharedKey: (*wgtypes.Key)(&psk), - }, - } - for _, peer := range device.Peers { - if peer.PublicKey == wgtypes.Key(wg.PublicKey) { - if publicKeyEmpty(peer.PresharedKey) || peer.PresharedKey == h.presharedKey { - log.Debugf("Restart wireguard connection to peer %s", peer.PublicKey) - config = []wgtypes.PeerConfig{ - { - PublicKey: wgtypes.Key(wg.PublicKey), - PresharedKey: (*wgtypes.Key)(&psk), - Endpoint: peer.Endpoint, - AllowedIPs: peer.AllowedIPs, - }, - } - err = h.client.ConfigureDevice(wg.Interface, wgtypes.Config{ - Peers: []wgtypes.PeerConfig{ - { - Remove: true, - PublicKey: wgtypes.Key(wg.PublicKey), - }, - }, - }) - if err != nil { - slog.Debug("Failed to remove peer") - return - } - } + // Mark peer as isInitialized after the successful first rotation + if !isInitialized { + h.mu.Lock() + if _, exists := h.peers[pid]; exists { + h.initializedPeers[pid] = true } - } - - if err = h.client.ConfigureDevice(wg.Interface, wgtypes.Config{ - Peers: config, - }); err != nil { - log.Errorf("Failed to apply rosenpass key: %v", err) + h.mu.Unlock() } } - -func publicKeyEmpty(key wgtypes.Key) bool { - for _, b := range key { - if b != 0 { - return false - } - } - return true -} diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index 935910fc9..aafef41d3 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -263,7 +263,14 @@ func (c *Client) IsLoginRequired() bool { return true } - needsLogin, err := internal.IsLoginRequired(ctx, cfg) + authClient, err := auth.NewAuth(ctx, cfg.PrivateKey, cfg.ManagementURL, cfg) + if err != nil { + log.Errorf("IsLoginRequired: failed to create auth client: %v", err) + return true // Assume login is required if we can't create auth client + } + defer authClient.Close() + + needsLogin, err := authClient.IsLoginRequired(ctx) if err != nil { log.Errorf("IsLoginRequired: check failed: %v", err) // If the check fails, assume login is required to be safe @@ -314,16 +321,19 @@ func (c *Client) LoginForMobile() string { // This could cause a potential race condition with loading the extension which need to be handled on swift side go func() { - waitTimeout := time.Duration(flowInfo.ExpiresIn) * time.Second - waitCTX, cancel := context.WithTimeout(ctx, waitTimeout) - defer cancel() - tokenInfo, err := oAuthFlow.WaitToken(waitCTX, flowInfo) + tokenInfo, err := oAuthFlow.WaitToken(ctx, flowInfo) if err != nil { log.Errorf("LoginForMobile: WaitToken failed: %v", err) return } jwtToken := tokenInfo.GetTokenToUse() - if err := internal.Login(ctx, cfg, "", jwtToken); err != nil { + authClient, err := auth.NewAuth(ctx, cfg.PrivateKey, cfg.ManagementURL, cfg) + if err != nil { + log.Errorf("LoginForMobile: failed to create auth client: %v", err) + return + } + defer authClient.Close() + if err, _ := authClient.Login(ctx, "", jwtToken); err != nil { log.Errorf("LoginForMobile: Login failed: %v", err) return } diff --git a/client/ios/NetBirdSDK/login.go b/client/ios/NetBirdSDK/login.go index 27fdcf5ef..9d447ef3f 100644 --- a/client/ios/NetBirdSDK/login.go +++ b/client/ios/NetBirdSDK/login.go @@ -7,13 +7,8 @@ import ( "fmt" "time" - "github.com/cenkalti/backoff/v4" log "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - gstatus "google.golang.org/grpc/status" - "github.com/netbirdio/netbird/client/cmd" - "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/system" @@ -90,34 +85,21 @@ func (a *Auth) SaveConfigIfSSOSupported(listener SSOListener) { } func (a *Auth) saveConfigIfSSOSupported() (bool, error) { - supportsSSO := true - err := a.withBackOff(a.ctx, func() (err error) { - _, err = internal.GetPKCEAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL, nil) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.NotFound || s.Code() == codes.Unimplemented) { - _, err = internal.GetDeviceAuthorizationFlowInfo(a.ctx, a.config.PrivateKey, a.config.ManagementURL) - s, ok := gstatus.FromError(err) - if !ok { - return err - } - if s.Code() == codes.NotFound || s.Code() == codes.Unimplemented { - supportsSSO = false - err = nil - } + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return false, fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() - return err - } - - return err - }) + supportsSSO, err := authClient.IsSSOSupported(a.ctx) + if err != nil { + return false, fmt.Errorf("failed to check SSO support: %v", err) + } if !supportsSSO { return false, nil } - if err != nil { - return false, fmt.Errorf("backoff cycle failed: %v", err) - } - // Use DirectWriteOutConfig to avoid atomic file operations (temp file + rename) // which are blocked by the tvOS sandbox in App Group containers err = profilemanager.DirectWriteOutConfig(a.cfgPath, a.config) @@ -141,19 +123,17 @@ func (a *Auth) LoginWithSetupKeyAndSaveConfig(resultListener ErrListener, setupK } func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string) error { + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + //nolint ctxWithValues := context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) - - err := a.withBackOff(a.ctx, func() error { - backoffErr := internal.Login(ctxWithValues, a.config, setupKey, "") - if s, ok := gstatus.FromError(backoffErr); ok && (s.Code() == codes.PermissionDenied) { - // we got an answer from management, exit backoff earlier - return backoff.Permanent(backoffErr) - } - return backoffErr - }) + err, _ = authClient.Login(ctxWithValues, setupKey, "") if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("login failed: %v", err) } // Use DirectWriteOutConfig to avoid atomic file operations (temp file + rename) @@ -164,15 +144,16 @@ func (a *Auth) loginWithSetupKeyAndSaveConfig(setupKey string, deviceName string // LoginSync performs a synchronous login check without UI interaction // Used for background VPN connection where user should already be authenticated func (a *Auth) LoginSync() error { - var needsLogin bool + authClient, err := auth.NewAuth(a.ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) + if err != nil { + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() // check if we need to generate JWT token - err := a.withBackOff(a.ctx, func() (err error) { - needsLogin, err = internal.IsLoginRequired(a.ctx, a.config) - return - }) + needsLogin, err := authClient.IsLoginRequired(a.ctx) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("failed to check login requirement: %v", err) } jwtToken := "" @@ -180,15 +161,12 @@ func (a *Auth) LoginSync() error { return fmt.Errorf("not authenticated") } - err = a.withBackOff(a.ctx, func() error { - err := internal.Login(a.ctx, a.config, "", jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { - // PermissionDenied means registration is required or peer is blocked - return backoff.Permanent(err) - } - return err - }) + err, isAuthError := authClient.Login(a.ctx, "", jwtToken) if err != nil { + if isAuthError { + // PermissionDenied means registration is required or peer is blocked + return fmt.Errorf("authentication error: %v", err) + } return fmt.Errorf("login failed: %v", err) } @@ -225,8 +203,6 @@ func (a *Auth) LoginWithDeviceName(resultListener ErrListener, urlOpener URLOpen } func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName string) error { - var needsLogin bool - // Create context with device name if provided ctx := a.ctx if deviceName != "" { @@ -234,33 +210,33 @@ func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName strin ctx = context.WithValue(a.ctx, system.DeviceNameCtxKey, deviceName) } - // check if we need to generate JWT token - err := a.withBackOff(ctx, func() (err error) { - needsLogin, err = internal.IsLoginRequired(ctx, a.config) - return - }) + authClient, err := auth.NewAuth(ctx, a.config.PrivateKey, a.config.ManagementURL, a.config) if err != nil { - return fmt.Errorf("backoff cycle failed: %v", err) + return fmt.Errorf("failed to create auth client: %v", err) + } + defer authClient.Close() + + // check if we need to generate JWT token + needsLogin, err := authClient.IsLoginRequired(ctx) + if err != nil { + return fmt.Errorf("failed to check login requirement: %v", err) } jwtToken := "" if needsLogin { - tokenInfo, err := a.foregroundGetTokenInfo(urlOpener, forceDeviceAuth) + tokenInfo, err := a.foregroundGetTokenInfo(authClient, urlOpener, forceDeviceAuth) if err != nil { return fmt.Errorf("interactive sso login failed: %v", err) } jwtToken = tokenInfo.GetTokenToUse() } - err = a.withBackOff(ctx, func() error { - err := internal.Login(ctx, a.config, "", jwtToken) - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) { - // PermissionDenied means registration is required or peer is blocked - return backoff.Permanent(err) - } - return err - }) + err, isAuthError := authClient.Login(ctx, "", jwtToken) if err != nil { + if isAuthError { + // PermissionDenied means registration is required or peer is blocked + return fmt.Errorf("authentication error: %v", err) + } return fmt.Errorf("login failed: %v", err) } @@ -285,10 +261,10 @@ func (a *Auth) login(urlOpener URLOpener, forceDeviceAuth bool, deviceName strin const authInfoRequestTimeout = 30 * time.Second -func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, forceDeviceAuth bool) (*auth.TokenInfo, error) { - oAuthFlow, err := auth.NewOAuthFlow(a.ctx, a.config, false, forceDeviceAuth, "") +func (a *Auth) foregroundGetTokenInfo(authClient *auth.Auth, urlOpener URLOpener, forceDeviceAuth bool) (*auth.TokenInfo, error) { + oAuthFlow, err := authClient.GetOAuthFlow(a.ctx, forceDeviceAuth) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get OAuth flow: %v", err) } // Use a bounded timeout for the auth info request to prevent indefinite hangs @@ -313,15 +289,6 @@ func (a *Auth) foregroundGetTokenInfo(urlOpener URLOpener, forceDeviceAuth bool) return &tokenInfo, nil } -func (a *Auth) withBackOff(ctx context.Context, bf func() error) error { - return backoff.RetryNotify( - bf, - backoff.WithContext(cmd.CLIBackOffSettings, ctx), - func(err error, duration time.Duration) { - log.Warnf("retrying Login to the Management service in %v due to error %v", duration, err) - }) -} - // GetConfigJSON returns the current config as a JSON string. // This can be used by the caller to persist the config via alternative storage // mechanisms (e.g., UserDefaults on tvOS where file writes are blocked). diff --git a/client/jobexec/executor.go b/client/jobexec/executor.go new file mode 100644 index 000000000..e29cc8840 --- /dev/null +++ b/client/jobexec/executor.go @@ -0,0 +1,76 @@ +package jobexec + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/internal/debug" + "github.com/netbirdio/netbird/upload-server/types" +) + +const ( + MaxBundleWaitTime = 60 * time.Minute // maximum wait time for bundle generation (1 hour) +) + +var ( + ErrJobNotImplemented = errors.New("job not implemented") +) + +type Executor struct { +} + +func NewExecutor() *Executor { + return &Executor{} +} + +func (e *Executor) BundleJob(ctx context.Context, debugBundleDependencies debug.GeneratorDependencies, params debug.BundleConfig, waitForDuration time.Duration, mgmURL string) (string, error) { + if waitForDuration > MaxBundleWaitTime { + log.Warnf("bundle wait time %v exceeds maximum %v, capping to maximum", waitForDuration, MaxBundleWaitTime) + waitForDuration = MaxBundleWaitTime + } + + if waitForDuration > 0 { + if err := waitFor(ctx, waitForDuration); err != nil { + return "", err + } + } + + log.Infof("execute debug bundle generation") + + bundleGenerator := debug.NewBundleGenerator(debugBundleDependencies, params) + + path, err := bundleGenerator.Generate() + if err != nil { + return "", fmt.Errorf("generate debug bundle: %w", err) + } + defer func() { + if err := os.Remove(path); err != nil { + log.Errorf("failed to remove debug bundle file: %v", err) + } + }() + + key, err := debug.UploadDebugBundle(ctx, types.DefaultBundleURL, mgmURL, path) + if err != nil { + log.Errorf("failed to upload debug bundle: %v", err) + return "", fmt.Errorf("upload debug bundle: %w", err) + } + + log.Infof("debug bundle has been generated successfully") + return key, nil +} + +func waitFor(ctx context.Context, duration time.Duration) error { + log.Infof("wait for %v minutes before executing debug bundle", duration.Minutes()) + select { + case <-time.After(duration): + return nil + case <-ctx.Done(): + log.Infof("wait cancelled: %v", ctx.Err()) + return ctx.Err() + } +} diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 5d56befc7..1d9d7233c 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.36.6 -// protoc v3.21.12 +// protoc v6.32.1 // source: daemon.proto package proto @@ -2757,7 +2757,6 @@ func (x *ForwardingRulesResponse) GetRules() []*ForwardingRule { type DebugBundleRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Anonymize bool `protobuf:"varint,1,opt,name=anonymize,proto3" json:"anonymize,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` SystemInfo bool `protobuf:"varint,3,opt,name=systemInfo,proto3" json:"systemInfo,omitempty"` UploadURL string `protobuf:"bytes,4,opt,name=uploadURL,proto3" json:"uploadURL,omitempty"` LogFileCount uint32 `protobuf:"varint,5,opt,name=logFileCount,proto3" json:"logFileCount,omitempty"` @@ -2802,13 +2801,6 @@ func (x *DebugBundleRequest) GetAnonymize() bool { return false } -func (x *DebugBundleRequest) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - func (x *DebugBundleRequest) GetSystemInfo() bool { if x != nil { return x.SystemInfo @@ -5372,6 +5364,154 @@ func (x *WaitJWTTokenResponse) GetExpiresIn() int64 { return 0 } +// StartCPUProfileRequest for starting CPU profiling +type StartCPUProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartCPUProfileRequest) Reset() { + *x = StartCPUProfileRequest{} + mi := &file_daemon_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartCPUProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartCPUProfileRequest) ProtoMessage() {} + +func (x *StartCPUProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[79] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartCPUProfileRequest.ProtoReflect.Descriptor instead. +func (*StartCPUProfileRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{79} +} + +// StartCPUProfileResponse confirms CPU profiling has started +type StartCPUProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartCPUProfileResponse) Reset() { + *x = StartCPUProfileResponse{} + mi := &file_daemon_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartCPUProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartCPUProfileResponse) ProtoMessage() {} + +func (x *StartCPUProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[80] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartCPUProfileResponse.ProtoReflect.Descriptor instead. +func (*StartCPUProfileResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{80} +} + +// StopCPUProfileRequest for stopping CPU profiling +type StopCPUProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopCPUProfileRequest) Reset() { + *x = StopCPUProfileRequest{} + mi := &file_daemon_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopCPUProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopCPUProfileRequest) ProtoMessage() {} + +func (x *StopCPUProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[81] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopCPUProfileRequest.ProtoReflect.Descriptor instead. +func (*StopCPUProfileRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{81} +} + +// StopCPUProfileResponse confirms CPU profiling has stopped +type StopCPUProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopCPUProfileResponse) Reset() { + *x = StopCPUProfileResponse{} + mi := &file_daemon_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopCPUProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopCPUProfileResponse) ProtoMessage() {} + +func (x *StopCPUProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[82] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopCPUProfileResponse.ProtoReflect.Descriptor instead. +func (*StopCPUProfileResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{82} +} + type InstallerResultRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -5380,7 +5520,7 @@ type InstallerResultRequest struct { func (x *InstallerResultRequest) Reset() { *x = InstallerResultRequest{} - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5392,7 +5532,7 @@ func (x *InstallerResultRequest) String() string { func (*InstallerResultRequest) ProtoMessage() {} func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[79] + mi := &file_daemon_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5405,7 +5545,7 @@ func (x *InstallerResultRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultRequest.ProtoReflect.Descriptor instead. func (*InstallerResultRequest) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{79} + return file_daemon_proto_rawDescGZIP(), []int{83} } type InstallerResultResponse struct { @@ -5418,7 +5558,7 @@ type InstallerResultResponse struct { func (x *InstallerResultResponse) Reset() { *x = InstallerResultResponse{} - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5430,7 +5570,7 @@ func (x *InstallerResultResponse) String() string { func (*InstallerResultResponse) ProtoMessage() {} func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[80] + mi := &file_daemon_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5443,7 +5583,7 @@ func (x *InstallerResultResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InstallerResultResponse.ProtoReflect.Descriptor instead. func (*InstallerResultResponse) Descriptor() ([]byte, []int) { - return file_daemon_proto_rawDescGZIP(), []int{80} + return file_daemon_proto_rawDescGZIP(), []int{84} } func (x *InstallerResultResponse) GetSuccess() bool { @@ -5470,7 +5610,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5482,7 +5622,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[82] + mi := &file_daemon_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5773,10 +5913,9 @@ const file_daemon_proto_rawDesc = "" + "\x12translatedHostname\x18\x04 \x01(\tR\x12translatedHostname\x128\n" + "\x0etranslatedPort\x18\x05 \x01(\v2\x10.daemon.PortInfoR\x0etranslatedPort\"G\n" + "\x17ForwardingRulesResponse\x12,\n" + - "\x05rules\x18\x01 \x03(\v2\x16.daemon.ForwardingRuleR\x05rules\"\xac\x01\n" + + "\x05rules\x18\x01 \x03(\v2\x16.daemon.ForwardingRuleR\x05rules\"\x94\x01\n" + "\x12DebugBundleRequest\x12\x1c\n" + - "\tanonymize\x18\x01 \x01(\bR\tanonymize\x12\x16\n" + - "\x06status\x18\x02 \x01(\tR\x06status\x12\x1e\n" + + "\tanonymize\x18\x01 \x01(\bR\tanonymize\x12\x1e\n" + "\n" + "systemInfo\x18\x03 \x01(\bR\n" + "systemInfo\x12\x1c\n" + @@ -6003,6 +6142,10 @@ const file_daemon_proto_rawDesc = "" + "\x05token\x18\x01 \x01(\tR\x05token\x12\x1c\n" + "\ttokenType\x18\x02 \x01(\tR\ttokenType\x12\x1c\n" + "\texpiresIn\x18\x03 \x01(\x03R\texpiresIn\"\x18\n" + + "\x16StartCPUProfileRequest\"\x19\n" + + "\x17StartCPUProfileResponse\"\x17\n" + + "\x15StopCPUProfileRequest\"\x18\n" + + "\x16StopCPUProfileResponse\"\x18\n" + "\x16InstallerResultRequest\"O\n" + "\x17InstallerResultResponse\x12\x18\n" + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x1a\n" + @@ -6015,7 +6158,7 @@ const file_daemon_proto_rawDesc = "" + "\x04WARN\x10\x04\x12\b\n" + "\x04INFO\x10\x05\x12\t\n" + "\x05DEBUG\x10\x06\x12\t\n" + - "\x05TRACE\x10\a2\xb4\x13\n" + + "\x05TRACE\x10\a2\xdd\x14\n" + "\rDaemonService\x126\n" + "\x05Login\x12\x14.daemon.LoginRequest\x1a\x15.daemon.LoginResponse\"\x00\x12K\n" + "\fWaitSSOLogin\x12\x1b.daemon.WaitSSOLoginRequest\x1a\x1c.daemon.WaitSSOLoginResponse\"\x00\x12-\n" + @@ -6050,7 +6193,9 @@ const file_daemon_proto_rawDesc = "" + "\vGetFeatures\x12\x1a.daemon.GetFeaturesRequest\x1a\x1b.daemon.GetFeaturesResponse\"\x00\x12Z\n" + "\x11GetPeerSSHHostKey\x12 .daemon.GetPeerSSHHostKeyRequest\x1a!.daemon.GetPeerSSHHostKeyResponse\"\x00\x12Q\n" + "\x0eRequestJWTAuth\x12\x1d.daemon.RequestJWTAuthRequest\x1a\x1e.daemon.RequestJWTAuthResponse\"\x00\x12K\n" + - "\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12N\n" + + "\fWaitJWTToken\x12\x1b.daemon.WaitJWTTokenRequest\x1a\x1c.daemon.WaitJWTTokenResponse\"\x00\x12T\n" + + "\x0fStartCPUProfile\x12\x1e.daemon.StartCPUProfileRequest\x1a\x1f.daemon.StartCPUProfileResponse\"\x00\x12Q\n" + + "\x0eStopCPUProfile\x12\x1d.daemon.StopCPUProfileRequest\x1a\x1e.daemon.StopCPUProfileResponse\"\x00\x12N\n" + "\x11NotifyOSLifecycle\x12\x1a.daemon.OSLifecycleRequest\x1a\x1b.daemon.OSLifecycleResponse\"\x00\x12W\n" + "\x12GetInstallerResult\x12\x1e.daemon.InstallerResultRequest\x1a\x1f.daemon.InstallerResultResponse\"\x00B\bZ\x06/protob\x06proto3" @@ -6067,7 +6212,7 @@ func file_daemon_proto_rawDescGZIP() []byte { } var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 84) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 88) var file_daemon_proto_goTypes = []any{ (LogLevel)(0), // 0: daemon.LogLevel (OSLifecycleRequest_CycleType)(0), // 1: daemon.OSLifecycleRequest.CycleType @@ -6152,21 +6297,25 @@ var file_daemon_proto_goTypes = []any{ (*RequestJWTAuthResponse)(nil), // 80: daemon.RequestJWTAuthResponse (*WaitJWTTokenRequest)(nil), // 81: daemon.WaitJWTTokenRequest (*WaitJWTTokenResponse)(nil), // 82: daemon.WaitJWTTokenResponse - (*InstallerResultRequest)(nil), // 83: daemon.InstallerResultRequest - (*InstallerResultResponse)(nil), // 84: daemon.InstallerResultResponse - nil, // 85: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 86: daemon.PortInfo.Range - nil, // 87: daemon.SystemEvent.MetadataEntry - (*durationpb.Duration)(nil), // 88: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 89: google.protobuf.Timestamp + (*StartCPUProfileRequest)(nil), // 83: daemon.StartCPUProfileRequest + (*StartCPUProfileResponse)(nil), // 84: daemon.StartCPUProfileResponse + (*StopCPUProfileRequest)(nil), // 85: daemon.StopCPUProfileRequest + (*StopCPUProfileResponse)(nil), // 86: daemon.StopCPUProfileResponse + (*InstallerResultRequest)(nil), // 87: daemon.InstallerResultRequest + (*InstallerResultResponse)(nil), // 88: daemon.InstallerResultResponse + nil, // 89: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 90: daemon.PortInfo.Range + nil, // 91: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 92: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 93: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ 1, // 0: daemon.OSLifecycleRequest.type:type_name -> daemon.OSLifecycleRequest.CycleType - 88, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 92, // 1: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration 27, // 2: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 89, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 89, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 88, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 93, // 3: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 93, // 4: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 92, // 5: daemon.PeerState.latency:type_name -> google.protobuf.Duration 25, // 6: daemon.SSHServerState.sessions:type_name -> daemon.SSHSessionInfo 22, // 7: daemon.FullStatus.managementState:type_name -> daemon.ManagementState 21, // 8: daemon.FullStatus.signalState:type_name -> daemon.SignalState @@ -6177,8 +6326,8 @@ var file_daemon_proto_depIdxs = []int32{ 57, // 13: daemon.FullStatus.events:type_name -> daemon.SystemEvent 26, // 14: daemon.FullStatus.sshServerState:type_name -> daemon.SSHServerState 33, // 15: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 85, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 86, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 89, // 16: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 90, // 17: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range 34, // 18: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo 34, // 19: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo 35, // 20: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule @@ -6189,10 +6338,10 @@ var file_daemon_proto_depIdxs = []int32{ 54, // 25: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage 2, // 26: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity 3, // 27: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category - 89, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp - 87, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 93, // 28: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 91, // 29: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry 57, // 30: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent - 88, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 92, // 31: daemon.SetConfigRequest.dnsRouteInterval:type_name -> google.protobuf.Duration 70, // 32: daemon.ListProfilesResponse.profiles:type_name -> daemon.Profile 32, // 33: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList 7, // 34: daemon.DaemonService.Login:input_type -> daemon.LoginRequest @@ -6226,43 +6375,47 @@ var file_daemon_proto_depIdxs = []int32{ 77, // 62: daemon.DaemonService.GetPeerSSHHostKey:input_type -> daemon.GetPeerSSHHostKeyRequest 79, // 63: daemon.DaemonService.RequestJWTAuth:input_type -> daemon.RequestJWTAuthRequest 81, // 64: daemon.DaemonService.WaitJWTToken:input_type -> daemon.WaitJWTTokenRequest - 5, // 65: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest - 83, // 66: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest - 8, // 67: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 10, // 68: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 12, // 69: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 14, // 70: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 16, // 71: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 18, // 72: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 29, // 73: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 31, // 74: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 31, // 75: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 36, // 76: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 38, // 77: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 40, // 78: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 42, // 79: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 45, // 80: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 47, // 81: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 49, // 82: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 51, // 83: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse - 55, // 84: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse - 57, // 85: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent - 59, // 86: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse - 61, // 87: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse - 63, // 88: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse - 65, // 89: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse - 67, // 90: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse - 69, // 91: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse - 72, // 92: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse - 74, // 93: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse - 76, // 94: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse - 78, // 95: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse - 80, // 96: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse - 82, // 97: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse - 6, // 98: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse - 84, // 99: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse - 67, // [67:100] is the sub-list for method output_type - 34, // [34:67] is the sub-list for method input_type + 83, // 65: daemon.DaemonService.StartCPUProfile:input_type -> daemon.StartCPUProfileRequest + 85, // 66: daemon.DaemonService.StopCPUProfile:input_type -> daemon.StopCPUProfileRequest + 5, // 67: daemon.DaemonService.NotifyOSLifecycle:input_type -> daemon.OSLifecycleRequest + 87, // 68: daemon.DaemonService.GetInstallerResult:input_type -> daemon.InstallerResultRequest + 8, // 69: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 10, // 70: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 12, // 71: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 14, // 72: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 16, // 73: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 18, // 74: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 29, // 75: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 31, // 76: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 31, // 77: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 36, // 78: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 38, // 79: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 40, // 80: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 42, // 81: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 45, // 82: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 47, // 83: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 49, // 84: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 51, // 85: daemon.DaemonService.SetSyncResponsePersistence:output_type -> daemon.SetSyncResponsePersistenceResponse + 55, // 86: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 57, // 87: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 59, // 88: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 61, // 89: daemon.DaemonService.SwitchProfile:output_type -> daemon.SwitchProfileResponse + 63, // 90: daemon.DaemonService.SetConfig:output_type -> daemon.SetConfigResponse + 65, // 91: daemon.DaemonService.AddProfile:output_type -> daemon.AddProfileResponse + 67, // 92: daemon.DaemonService.RemoveProfile:output_type -> daemon.RemoveProfileResponse + 69, // 93: daemon.DaemonService.ListProfiles:output_type -> daemon.ListProfilesResponse + 72, // 94: daemon.DaemonService.GetActiveProfile:output_type -> daemon.GetActiveProfileResponse + 74, // 95: daemon.DaemonService.Logout:output_type -> daemon.LogoutResponse + 76, // 96: daemon.DaemonService.GetFeatures:output_type -> daemon.GetFeaturesResponse + 78, // 97: daemon.DaemonService.GetPeerSSHHostKey:output_type -> daemon.GetPeerSSHHostKeyResponse + 80, // 98: daemon.DaemonService.RequestJWTAuth:output_type -> daemon.RequestJWTAuthResponse + 82, // 99: daemon.DaemonService.WaitJWTToken:output_type -> daemon.WaitJWTTokenResponse + 84, // 100: daemon.DaemonService.StartCPUProfile:output_type -> daemon.StartCPUProfileResponse + 86, // 101: daemon.DaemonService.StopCPUProfile:output_type -> daemon.StopCPUProfileResponse + 6, // 102: daemon.DaemonService.NotifyOSLifecycle:output_type -> daemon.OSLifecycleResponse + 88, // 103: daemon.DaemonService.GetInstallerResult:output_type -> daemon.InstallerResultResponse + 69, // [69:104] is the sub-list for method output_type + 34, // [34:69] is the sub-list for method input_type 34, // [34:34] is the sub-list for extension type_name 34, // [34:34] is the sub-list for extension extendee 0, // [0:34] is the sub-list for field type_name @@ -6292,7 +6445,7 @@ func file_daemon_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_proto_rawDesc), len(file_daemon_proto_rawDesc)), NumEnums: 4, - NumMessages: 84, + NumMessages: 88, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index b75ca821a..68b9a9348 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -94,6 +94,12 @@ service DaemonService { // WaitJWTToken waits for JWT authentication completion rpc WaitJWTToken(WaitJWTTokenRequest) returns (WaitJWTTokenResponse) {} +// StartCPUProfile starts CPU profiling in the daemon + rpc StartCPUProfile(StartCPUProfileRequest) returns (StartCPUProfileResponse) {} + + // StopCPUProfile stops CPU profiling in the daemon + rpc StopCPUProfile(StopCPUProfileRequest) returns (StopCPUProfileResponse) {} + rpc NotifyOSLifecycle(OSLifecycleRequest) returns(OSLifecycleResponse) {} rpc GetInstallerResult(InstallerResultRequest) returns (InstallerResultResponse) {} @@ -455,7 +461,6 @@ message ForwardingRulesResponse { // DebugBundler message DebugBundleRequest { bool anonymize = 1; - string status = 2; bool systemInfo = 3; string uploadURL = 4; uint32 logFileCount = 5; @@ -777,6 +782,18 @@ message WaitJWTTokenResponse { int64 expiresIn = 3; } +// StartCPUProfileRequest for starting CPU profiling +message StartCPUProfileRequest {} + +// StartCPUProfileResponse confirms CPU profiling has started +message StartCPUProfileResponse {} + +// StopCPUProfileRequest for stopping CPU profiling +message StopCPUProfileRequest {} + +// StopCPUProfileResponse confirms CPU profiling has stopped +message StopCPUProfileResponse {} + message InstallerResultRequest { } diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index fdabb1879..ea9b4df05 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -70,6 +70,10 @@ type DaemonServiceClient interface { RequestJWTAuth(ctx context.Context, in *RequestJWTAuthRequest, opts ...grpc.CallOption) (*RequestJWTAuthResponse, error) // WaitJWTToken waits for JWT authentication completion WaitJWTToken(ctx context.Context, in *WaitJWTTokenRequest, opts ...grpc.CallOption) (*WaitJWTTokenResponse, error) + // StartCPUProfile starts CPU profiling in the daemon + StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) + // StopCPUProfile stops CPU profiling in the daemon + StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) GetInstallerResult(ctx context.Context, in *InstallerResultRequest, opts ...grpc.CallOption) (*InstallerResultResponse, error) } @@ -384,6 +388,24 @@ func (c *daemonServiceClient) WaitJWTToken(ctx context.Context, in *WaitJWTToken return out, nil } +func (c *daemonServiceClient) StartCPUProfile(ctx context.Context, in *StartCPUProfileRequest, opts ...grpc.CallOption) (*StartCPUProfileResponse, error) { + out := new(StartCPUProfileResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StartCPUProfile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) StopCPUProfile(ctx context.Context, in *StopCPUProfileRequest, opts ...grpc.CallOption) (*StopCPUProfileResponse, error) { + out := new(StopCPUProfileResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/StopCPUProfile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *daemonServiceClient) NotifyOSLifecycle(ctx context.Context, in *OSLifecycleRequest, opts ...grpc.CallOption) (*OSLifecycleResponse, error) { out := new(OSLifecycleResponse) err := c.cc.Invoke(ctx, "/daemon.DaemonService/NotifyOSLifecycle", in, out, opts...) @@ -458,6 +480,10 @@ type DaemonServiceServer interface { RequestJWTAuth(context.Context, *RequestJWTAuthRequest) (*RequestJWTAuthResponse, error) // WaitJWTToken waits for JWT authentication completion WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) + // StartCPUProfile starts CPU profiling in the daemon + StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) + // StopCPUProfile stops CPU profiling in the daemon + StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) GetInstallerResult(context.Context, *InstallerResultRequest) (*InstallerResultResponse, error) mustEmbedUnimplementedDaemonServiceServer() @@ -560,6 +586,12 @@ func (UnimplementedDaemonServiceServer) RequestJWTAuth(context.Context, *Request func (UnimplementedDaemonServiceServer) WaitJWTToken(context.Context, *WaitJWTTokenRequest) (*WaitJWTTokenResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WaitJWTToken not implemented") } +func (UnimplementedDaemonServiceServer) StartCPUProfile(context.Context, *StartCPUProfileRequest) (*StartCPUProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartCPUProfile not implemented") +} +func (UnimplementedDaemonServiceServer) StopCPUProfile(context.Context, *StopCPUProfileRequest) (*StopCPUProfileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopCPUProfile not implemented") +} func (UnimplementedDaemonServiceServer) NotifyOSLifecycle(context.Context, *OSLifecycleRequest) (*OSLifecycleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method NotifyOSLifecycle not implemented") } @@ -1140,6 +1172,42 @@ func _DaemonService_WaitJWTToken_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _DaemonService_StartCPUProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartCPUProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StartCPUProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/StartCPUProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StartCPUProfile(ctx, req.(*StartCPUProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_StopCPUProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopCPUProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StopCPUProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/StopCPUProfile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StopCPUProfile(ctx, req.(*StopCPUProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _DaemonService_NotifyOSLifecycle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OSLifecycleRequest) if err := dec(in); err != nil { @@ -1303,6 +1371,14 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "WaitJWTToken", Handler: _DaemonService_WaitJWTToken_Handler, }, + { + MethodName: "StartCPUProfile", + Handler: _DaemonService_StartCPUProfile_Handler, + }, + { + MethodName: "StopCPUProfile", + Handler: _DaemonService_StopCPUProfile_Handler, + }, { MethodName: "NotifyOSLifecycle", Handler: _DaemonService_NotifyOSLifecycle_Handler, diff --git a/client/server/debug.go b/client/server/debug.go index dfad41604..4c531efba 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -3,25 +3,19 @@ package server import ( + "bytes" "context" - "crypto/sha256" - "encoding/json" "errors" "fmt" - "io" - "net/http" - "os" + "runtime/pprof" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/internal/debug" "github.com/netbirdio/netbird/client/proto" mgmProto "github.com/netbirdio/netbird/shared/management/proto" - "github.com/netbirdio/netbird/upload-server/types" ) -const maxBundleUploadSize = 50 * 1024 * 1024 - // DebugBundle creates a debug bundle and returns the location. func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) (resp *proto.DebugBundleResponse, err error) { s.mutex.Lock() @@ -32,16 +26,37 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( log.Warnf("failed to get latest sync response: %v", err) } + var cpuProfileData []byte + if s.cpuProfileBuf != nil && !s.cpuProfiling { + cpuProfileData = s.cpuProfileBuf.Bytes() + defer func() { + s.cpuProfileBuf = nil + }() + } + + // Prepare refresh callback for health probes + var refreshStatus func() + if s.connectClient != nil { + engine := s.connectClient.Engine() + if engine != nil { + refreshStatus = func() { + log.Debug("refreshing system health status for debug bundle") + engine.RunHealthProbes(true) + } + } + } + bundleGenerator := debug.NewBundleGenerator( debug.GeneratorDependencies{ InternalConfig: s.config, StatusRecorder: s.statusRecorder, SyncResponse: syncResponse, - LogFile: s.logFile, + LogPath: s.logFile, + CPUProfile: cpuProfileData, + RefreshStatus: refreshStatus, }, debug.BundleConfig{ Anonymize: req.GetAnonymize(), - ClientStatus: req.GetStatus(), IncludeSystemInfo: req.GetSystemInfo(), LogFileCount: req.GetLogFileCount(), }, @@ -55,7 +70,7 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( if req.GetUploadURL() == "" { return &proto.DebugBundleResponse{Path: path}, nil } - key, err := uploadDebugBundle(context.Background(), req.GetUploadURL(), s.config.ManagementURL.String(), path) + key, err := debug.UploadDebugBundle(context.Background(), req.GetUploadURL(), s.config.ManagementURL.String(), path) if err != nil { log.Errorf("failed to upload debug bundle to %s: %v", req.GetUploadURL(), err) return &proto.DebugBundleResponse{Path: path, UploadFailureReason: err.Error()}, nil @@ -66,92 +81,6 @@ func (s *Server) DebugBundle(_ context.Context, req *proto.DebugBundleRequest) ( return &proto.DebugBundleResponse{Path: path, UploadedKey: key}, nil } -func uploadDebugBundle(ctx context.Context, url, managementURL, filePath string) (key string, err error) { - response, err := getUploadURL(ctx, url, managementURL) - if err != nil { - return "", err - } - - err = upload(ctx, filePath, response) - if err != nil { - return "", err - } - return response.Key, nil -} - -func upload(ctx context.Context, filePath string, response *types.GetURLResponse) error { - fileData, err := os.Open(filePath) - if err != nil { - return fmt.Errorf("open file: %w", err) - } - - defer fileData.Close() - - stat, err := fileData.Stat() - if err != nil { - return fmt.Errorf("stat file: %w", err) - } - - if stat.Size() > maxBundleUploadSize { - return fmt.Errorf("file size exceeds maximum limit of %d bytes", maxBundleUploadSize) - } - - req, err := http.NewRequestWithContext(ctx, "PUT", response.URL, fileData) - if err != nil { - return fmt.Errorf("create PUT request: %w", err) - } - - req.ContentLength = stat.Size() - req.Header.Set("Content-Type", "application/octet-stream") - - putResp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("upload failed: %v", err) - } - defer putResp.Body.Close() - - if putResp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(putResp.Body) - return fmt.Errorf("upload status %d: %s", putResp.StatusCode, string(body)) - } - return nil -} - -func getUploadURL(ctx context.Context, url string, managementURL string) (*types.GetURLResponse, error) { - id := getURLHash(managementURL) - getReq, err := http.NewRequestWithContext(ctx, "GET", url+"?id="+id, nil) - if err != nil { - return nil, fmt.Errorf("create GET request: %w", err) - } - - getReq.Header.Set(types.ClientHeader, types.ClientHeaderValue) - - resp, err := http.DefaultClient.Do(getReq) - if err != nil { - return nil, fmt.Errorf("get presigned URL: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("get presigned URL status %d: %s", resp.StatusCode, string(body)) - } - - urlBytes, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("read response body: %w", err) - } - var response types.GetURLResponse - if err := json.Unmarshal(urlBytes, &response); err != nil { - return nil, fmt.Errorf("unmarshal response: %w", err) - } - return &response, nil -} - -func getURLHash(url string) string { - return fmt.Sprintf("%x", sha256.Sum256([]byte(url))) -} - // GetLogLevel gets the current logging level for the server. func (s *Server) GetLogLevel(_ context.Context, _ *proto.GetLogLevelRequest) (*proto.GetLogLevelResponse, error) { s.mutex.Lock() @@ -204,3 +133,43 @@ func (s *Server) getLatestSyncResponse() (*mgmProto.SyncResponse, error) { return cClient.GetLatestSyncResponse() } + +// StartCPUProfile starts CPU profiling in the daemon. +func (s *Server) StartCPUProfile(_ context.Context, _ *proto.StartCPUProfileRequest) (*proto.StartCPUProfileResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.cpuProfiling { + return nil, fmt.Errorf("CPU profiling already in progress") + } + + s.cpuProfileBuf = &bytes.Buffer{} + s.cpuProfiling = true + if err := pprof.StartCPUProfile(s.cpuProfileBuf); err != nil { + s.cpuProfileBuf = nil + s.cpuProfiling = false + return nil, fmt.Errorf("start CPU profile: %w", err) + } + + log.Info("CPU profiling started") + return &proto.StartCPUProfileResponse{}, nil +} + +// StopCPUProfile stops CPU profiling in the daemon. +func (s *Server) StopCPUProfile(_ context.Context, _ *proto.StopCPUProfileRequest) (*proto.StopCPUProfileResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if !s.cpuProfiling { + return nil, fmt.Errorf("CPU profiling not in progress") + } + + pprof.StopCPUProfile() + s.cpuProfiling = false + + if s.cpuProfileBuf != nil { + log.Infof("CPU profiling stopped, captured %d bytes", s.cpuProfileBuf.Len()) + } + + return &proto.StopCPUProfileResponse{}, nil +} diff --git a/client/server/server.go b/client/server/server.go index d593b3f34..108eab9fe 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1,6 +1,7 @@ package server import ( + "bytes" "context" "errors" "fmt" @@ -13,9 +14,8 @@ import ( "time" "github.com/cenkalti/backoff/v4" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" @@ -67,7 +67,7 @@ type Server struct { proto.UnimplementedDaemonServiceServer clientRunning bool // protected by mutex clientRunningChan chan struct{} - clientGiveUpChan chan struct{} + clientGiveUpChan chan struct{} // closed when connectWithRetryRuns goroutine exits connectClient *internal.ConnectClient @@ -78,6 +78,9 @@ type Server struct { persistSyncResponse bool isSessionActive atomic.Bool + cpuProfileBuf *bytes.Buffer + cpuProfiling bool + profileManager *profilemanager.ServiceManager profilesDisabled bool updateSettingsDisabled bool @@ -250,10 +253,17 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, profileConfig *profil // loginAttempt attempts to login using the provided information. it returns a status in case something fails func (s *Server) loginAttempt(ctx context.Context, setupKey, jwtToken string) (internal.StatusType, error) { - var status internal.StatusType - err := internal.Login(ctx, s.config, setupKey, jwtToken) + authClient, err := auth.NewAuth(ctx, s.config.PrivateKey, s.config.ManagementURL, s.config) if err != nil { - if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) { + log.Errorf("failed to create auth client: %v", err) + return internal.StatusLoginFailed, err + } + defer authClient.Close() + + var status internal.StatusType + err, isAuthError := authClient.Login(ctx, setupKey, jwtToken) + if err != nil { + if isAuthError { log.Warnf("failed login: %v", err) status = internal.StatusNeedsLogin } else { @@ -578,8 +588,7 @@ func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLogin s.oauthAuthFlow.waitCancel() } - waitTimeout := time.Until(s.oauthAuthFlow.expiresAt) - waitCTX, cancel := context.WithTimeout(ctx, waitTimeout) + waitCTX, cancel := context.WithCancel(ctx) defer cancel() s.mutex.Lock() @@ -793,9 +802,11 @@ func (s *Server) SwitchProfile(callerCtx context.Context, msg *proto.SwitchProfi // Down engine work in the daemon. func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownResponse, error) { s.mutex.Lock() - defer s.mutex.Unlock() + + giveUpChan := s.clientGiveUpChan if err := s.cleanupConnection(); err != nil { + s.mutex.Unlock() // todo review to update the status in case any type of error log.Errorf("failed to shut down properly: %v", err) return nil, err @@ -804,6 +815,20 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes state := internal.CtxGetState(s.rootCtx) state.Set(internal.StatusIdle) + s.mutex.Unlock() + + // Wait for the connectWithRetryRuns goroutine to finish with a short timeout. + // This prevents the goroutine from setting ErrResetConnection after Down() returns. + // The giveUpChan is closed at the end of connectWithRetryRuns. + if giveUpChan != nil { + select { + case <-giveUpChan: + log.Debugf("client goroutine finished successfully") + case <-time.After(5 * time.Second): + log.Warnf("timeout waiting for client goroutine to finish, proceeding anyway") + } + } + return &proto.DownResponse{}, nil } @@ -1308,6 +1333,10 @@ func (s *Server) runProbes(waitForProbeResult bool) { if engine.RunHealthProbes(waitForProbeResult) { s.lastProbe = time.Now() } + } else { + if err := s.statusRecorder.RefreshWireGuardStats(); err != nil { + log.Debugf("failed to refresh WireGuard stats: %v", err) + } } } @@ -1521,7 +1550,7 @@ func (s *Server) connect(ctx context.Context, config *profilemanager.Config, sta log.Tracef("running client connection") s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate) s.connectClient.SetSyncResponsePersistence(s.persistSyncResponse) - if err := s.connectClient.Run(runningChan); err != nil { + if err := s.connectClient.Run(runningChan, s.logFile); err != nil { return err } return nil diff --git a/client/server/server_test.go b/client/server/server_test.go index 1ed115769..82079c531 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/groups" @@ -306,6 +307,8 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve peersManager := peers.NewManager(store, permissionsManagerMock) settingsManagerMock := settings.NewMockManager(ctrl) + jobManager := job.NewJobManager(nil, store, peersManager) + ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManager, settingsManagerMock, eventStore) metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) @@ -317,7 +320,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve requestBuffer := server.NewAccountRequestBuffer(context.Background(), store) peersUpdateManager := update_channel.NewPeersUpdateManager(metrics) networkMapController := controller.NewController(context.Background(), store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManager), config) - accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + accountManager, err := server.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) if err != nil { return nil, "", err } @@ -326,7 +329,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve if err != nil { return nil, "", err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, &server.MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, "", err } diff --git a/client/ssh/proxy/proxy.go b/client/ssh/proxy/proxy.go index cb1c36e13..8897b9c7e 100644 --- a/client/ssh/proxy/proxy.go +++ b/client/ssh/proxy/proxy.go @@ -207,8 +207,6 @@ func (p *SSHProxy) handleProxyExitCode(session ssh.Session, err error) { } func (p *SSHProxy) handleNonInteractiveSession(session ssh.Session, sshClient *cryptossh.Client) { - // Create a backend session to mirror the client's session request. - // This keeps the connection alive on the server side while port forwarding channels operate. serverSession, err := sshClient.NewSession() if err != nil { _, _ = fmt.Fprintf(p.stderr, "create server session: %v\n", err) @@ -216,10 +214,28 @@ func (p *SSHProxy) handleNonInteractiveSession(session ssh.Session, sshClient *c } defer func() { _ = serverSession.Close() }() - <-session.Context().Done() + serverSession.Stdin = session + serverSession.Stdout = session + serverSession.Stderr = session.Stderr() - if err := session.Exit(0); err != nil { - log.Debugf("session exit: %v", err) + if err := serverSession.Shell(); err != nil { + log.Debugf("start shell: %v", err) + return + } + + done := make(chan error, 1) + go func() { + done <- serverSession.Wait() + }() + + select { + case <-session.Context().Done(): + return + case err := <-done: + if err != nil { + log.Debugf("shell session: %v", err) + p.handleProxyExitCode(session, err) + } } } diff --git a/client/ssh/server/command_execution.go b/client/ssh/server/command_execution.go index 7a01ce4f6..b0a85fe4b 100644 --- a/client/ssh/server/command_execution.go +++ b/client/ssh/server/command_execution.go @@ -12,8 +12,8 @@ import ( log "github.com/sirupsen/logrus" ) -// handleCommand executes an SSH command with privilege validation -func (s *Server) handleCommand(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, winCh <-chan ssh.Window) { +// handleExecution executes an SSH command or shell with privilege validation +func (s *Server) handleExecution(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) { hasPty := winCh != nil commandType := "command" @@ -23,7 +23,7 @@ func (s *Server) handleCommand(logger *log.Entry, session ssh.Session, privilege logger.Infof("executing %s: %s", commandType, safeLogCommand(session.Command())) - execCmd, cleanup, err := s.createCommand(privilegeResult, session, hasPty) + execCmd, cleanup, err := s.createCommand(logger, privilegeResult, session, hasPty) if err != nil { logger.Errorf("%s creation failed: %v", commandType, err) @@ -51,13 +51,12 @@ func (s *Server) handleCommand(logger *log.Entry, session ssh.Session, privilege defer cleanup() - ptyReq, _, _ := session.Pty() if s.executeCommandWithPty(logger, session, execCmd, privilegeResult, ptyReq, winCh) { logger.Debugf("%s execution completed", commandType) } } -func (s *Server) createCommand(privilegeResult PrivilegeCheckResult, session ssh.Session, hasPty bool) (*exec.Cmd, func(), error) { +func (s *Server) createCommand(logger *log.Entry, privilegeResult PrivilegeCheckResult, session ssh.Session, hasPty bool) (*exec.Cmd, func(), error) { localUser := privilegeResult.User if localUser == nil { return nil, nil, errors.New("no user in privilege result") @@ -66,28 +65,28 @@ func (s *Server) createCommand(privilegeResult PrivilegeCheckResult, session ssh // If PTY requested but su doesn't support --pty, skip su and use executor // This ensures PTY functionality is provided (executor runs within our allocated PTY) if hasPty && !s.suSupportsPty { - log.Debugf("PTY requested but su doesn't support --pty, using executor for PTY functionality") - cmd, cleanup, err := s.createExecutorCommand(session, localUser, hasPty) + logger.Debugf("PTY requested but su doesn't support --pty, using executor for PTY functionality") + cmd, cleanup, err := s.createExecutorCommand(logger, session, localUser, hasPty) if err != nil { return nil, nil, fmt.Errorf("create command with privileges: %w", err) } - cmd.Env = s.prepareCommandEnv(localUser, session) + cmd.Env = s.prepareCommandEnv(logger, localUser, session) return cmd, cleanup, nil } // Try su first for system integration (PAM/audit) when privileged - cmd, err := s.createSuCommand(session, localUser, hasPty) + cmd, err := s.createSuCommand(logger, session, localUser, hasPty) if err != nil || privilegeResult.UsedFallback { - log.Debugf("su command failed, falling back to executor: %v", err) - cmd, cleanup, err := s.createExecutorCommand(session, localUser, hasPty) + logger.Debugf("su command failed, falling back to executor: %v", err) + cmd, cleanup, err := s.createExecutorCommand(logger, session, localUser, hasPty) if err != nil { return nil, nil, fmt.Errorf("create command with privileges: %w", err) } - cmd.Env = s.prepareCommandEnv(localUser, session) + cmd.Env = s.prepareCommandEnv(logger, localUser, session) return cmd, cleanup, nil } - cmd.Env = s.prepareCommandEnv(localUser, session) + cmd.Env = s.prepareCommandEnv(logger, localUser, session) return cmd, func() {}, nil } diff --git a/client/ssh/server/command_execution_js.go b/client/ssh/server/command_execution_js.go index 01759a337..3aeaa135c 100644 --- a/client/ssh/server/command_execution_js.go +++ b/client/ssh/server/command_execution_js.go @@ -15,17 +15,17 @@ import ( var errNotSupported = errors.New("SSH server command execution not supported on WASM/JS platform") // createSuCommand is not supported on JS/WASM -func (s *Server) createSuCommand(_ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, error) { +func (s *Server) createSuCommand(_ *log.Entry, _ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, error) { return nil, errNotSupported } // createExecutorCommand is not supported on JS/WASM -func (s *Server) createExecutorCommand(_ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, func(), error) { +func (s *Server) createExecutorCommand(_ *log.Entry, _ ssh.Session, _ *user.User, _ bool) (*exec.Cmd, func(), error) { return nil, nil, errNotSupported } // prepareCommandEnv is not supported on JS/WASM -func (s *Server) prepareCommandEnv(_ *user.User, _ ssh.Session) []string { +func (s *Server) prepareCommandEnv(_ *log.Entry, _ *user.User, _ ssh.Session) []string { return nil } diff --git a/client/ssh/server/command_execution_unix.go b/client/ssh/server/command_execution_unix.go index db1a9bcfe..279b89341 100644 --- a/client/ssh/server/command_execution_unix.go +++ b/client/ssh/server/command_execution_unix.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "os/user" + "path/filepath" "runtime" "strings" "sync" @@ -99,40 +100,52 @@ func (s *Server) detectUtilLinuxLogin(ctx context.Context) bool { return isUtilLinux } -// createSuCommand creates a command using su -l -c for privilege switching -func (s *Server) createSuCommand(session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, error) { +// createSuCommand creates a command using su - for privilege switching. +func (s *Server) createSuCommand(logger *log.Entry, session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, error) { + if err := validateUsername(localUser.Username); err != nil { + return nil, fmt.Errorf("invalid username %q: %w", localUser.Username, err) + } + suPath, err := exec.LookPath("su") if err != nil { return nil, fmt.Errorf("su command not available: %w", err) } - command := session.RawCommand() - if command == "" { - return nil, fmt.Errorf("no command specified for su execution") - } - - args := []string{"-l"} + args := []string{"-"} if hasPty && s.suSupportsPty { args = append(args, "--pty") } - args = append(args, localUser.Username, "-c", command) + args = append(args, localUser.Username) + command := session.RawCommand() + if command != "" { + args = append(args, "-c", command) + } + + logger.Debugf("creating su command: %s %v", suPath, args) cmd := exec.CommandContext(session.Context(), suPath, args...) cmd.Dir = localUser.HomeDir return cmd, nil } -// getShellCommandArgs returns the shell command and arguments for executing a command string +// getShellCommandArgs returns the shell command and arguments for executing a command string. func (s *Server) getShellCommandArgs(shell, cmdString string) []string { if cmdString == "" { - return []string{shell, "-l"} + return []string{shell} } - return []string{shell, "-l", "-c", cmdString} + return []string{shell, "-c", cmdString} +} + +// createShellCommand creates an exec.Cmd configured as a login shell by setting argv[0] to "-shellname". +func (s *Server) createShellCommand(ctx context.Context, shell string, args []string) *exec.Cmd { + cmd := exec.CommandContext(ctx, shell, args[1:]...) + cmd.Args[0] = "-" + filepath.Base(shell) + return cmd } // prepareCommandEnv prepares environment variables for command execution on Unix -func (s *Server) prepareCommandEnv(localUser *user.User, session ssh.Session) []string { +func (s *Server) prepareCommandEnv(_ *log.Entry, localUser *user.User, session ssh.Session) []string { env := prepareUserEnv(localUser, getUserShell(localUser.Uid)) env = append(env, prepareSSHEnv(session)...) for _, v := range session.Environ() { @@ -154,7 +167,7 @@ func (s *Server) executeCommandWithPty(logger *log.Entry, session ssh.Session, e return s.runPtyCommand(logger, session, execCmd, ptyReq, winCh) } -func (s *Server) handlePty(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { +func (s *Server) handlePtyLogin(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { execCmd, err := s.createPtyCommand(privilegeResult, ptyReq, session) if err != nil { logger.Errorf("Pty command creation failed: %v", err) @@ -244,11 +257,6 @@ func (s *Server) handlePtyIO(logger *log.Entry, session ssh.Session, ptyMgr *pty }() go func() { - defer func() { - if err := session.Close(); err != nil && !errors.Is(err, io.EOF) { - logger.Debugf("session close error: %v", err) - } - }() if _, err := io.Copy(session, ptmx); err != nil { if !errors.Is(err, io.EOF) && !errors.Is(err, syscall.EIO) { logger.Warnf("Pty output copy error: %v", err) @@ -268,7 +276,7 @@ func (s *Server) waitForPtyCompletion(logger *log.Entry, session ssh.Session, ex case <-ctx.Done(): s.handlePtySessionCancellation(logger, session, execCmd, ptyMgr, done) case err := <-done: - s.handlePtyCommandCompletion(logger, session, err) + s.handlePtyCommandCompletion(logger, session, ptyMgr, err) } } @@ -296,17 +304,20 @@ func (s *Server) handlePtySessionCancellation(logger *log.Entry, session ssh.Ses } } -func (s *Server) handlePtyCommandCompletion(logger *log.Entry, session ssh.Session, err error) { +func (s *Server) handlePtyCommandCompletion(logger *log.Entry, session ssh.Session, ptyMgr *ptyManager, err error) { if err != nil { logger.Debugf("Pty command execution failed: %v", err) s.handleSessionExit(session, err, logger) - return + } else { + logger.Debugf("Pty command completed successfully") + if err := session.Exit(0); err != nil { + logSessionExitError(logger, err) + } } - // Normal completion - logger.Debugf("Pty command completed successfully") - if err := session.Exit(0); err != nil { - logSessionExitError(logger, err) + // Close PTY to unblock io.Copy goroutines + if err := ptyMgr.Close(); err != nil { + logger.Debugf("Pty close after completion: %v", err) } } diff --git a/client/ssh/server/command_execution_windows.go b/client/ssh/server/command_execution_windows.go index 998796871..e1ba777f6 100644 --- a/client/ssh/server/command_execution_windows.go +++ b/client/ssh/server/command_execution_windows.go @@ -20,32 +20,32 @@ import ( // getUserEnvironment retrieves the Windows environment for the target user. // Follows OpenSSH's resilient approach with graceful degradation on failures. -func (s *Server) getUserEnvironment(username, domain string) ([]string, error) { - userToken, err := s.getUserToken(username, domain) +func (s *Server) getUserEnvironment(logger *log.Entry, username, domain string) ([]string, error) { + userToken, err := s.getUserToken(logger, username, domain) if err != nil { return nil, fmt.Errorf("get user token: %w", err) } defer func() { if err := windows.CloseHandle(userToken); err != nil { - log.Debugf("close user token: %v", err) + logger.Debugf("close user token: %v", err) } }() - return s.getUserEnvironmentWithToken(userToken, username, domain) + return s.getUserEnvironmentWithToken(logger, userToken, username, domain) } // getUserEnvironmentWithToken retrieves the Windows environment using an existing token. -func (s *Server) getUserEnvironmentWithToken(userToken windows.Handle, username, domain string) ([]string, error) { +func (s *Server) getUserEnvironmentWithToken(logger *log.Entry, userToken windows.Handle, username, domain string) ([]string, error) { userProfile, err := s.loadUserProfile(userToken, username, domain) if err != nil { - log.Debugf("failed to load user profile for %s\\%s: %v", domain, username, err) + logger.Debugf("failed to load user profile for %s\\%s: %v", domain, username, err) userProfile = fmt.Sprintf("C:\\Users\\%s", username) } envMap := make(map[string]string) if err := s.loadSystemEnvironment(envMap); err != nil { - log.Debugf("failed to load system environment from registry: %v", err) + logger.Debugf("failed to load system environment from registry: %v", err) } s.setUserEnvironmentVariables(envMap, userProfile, username, domain) @@ -59,8 +59,8 @@ func (s *Server) getUserEnvironmentWithToken(userToken windows.Handle, username, } // getUserToken creates a user token for the specified user. -func (s *Server) getUserToken(username, domain string) (windows.Handle, error) { - privilegeDropper := NewPrivilegeDropper() +func (s *Server) getUserToken(logger *log.Entry, username, domain string) (windows.Handle, error) { + privilegeDropper := NewPrivilegeDropper(WithLogger(logger)) token, err := privilegeDropper.createToken(username, domain) if err != nil { return 0, fmt.Errorf("generate S4U user token: %w", err) @@ -242,9 +242,9 @@ func (s *Server) setUserEnvironmentVariables(envMap map[string]string, userProfi } // prepareCommandEnv prepares environment variables for command execution on Windows -func (s *Server) prepareCommandEnv(localUser *user.User, session ssh.Session) []string { +func (s *Server) prepareCommandEnv(logger *log.Entry, localUser *user.User, session ssh.Session) []string { username, domain := s.parseUsername(localUser.Username) - userEnv, err := s.getUserEnvironment(username, domain) + userEnv, err := s.getUserEnvironment(logger, username, domain) if err != nil { log.Debugf("failed to get user environment for %s\\%s, using fallback: %v", domain, username, err) env := prepareUserEnv(localUser, getUserShell(localUser.Uid)) @@ -267,22 +267,16 @@ func (s *Server) prepareCommandEnv(localUser *user.User, session ssh.Session) [] return env } -func (s *Server) handlePty(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { +func (s *Server) handlePtyLogin(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, _ <-chan ssh.Window) bool { if privilegeResult.User == nil { logger.Errorf("no user in privilege result") return false } - cmd := session.Command() shell := getUserShell(privilegeResult.User.Uid) + logger.Infof("starting interactive shell: %s", shell) - if len(cmd) == 0 { - logger.Infof("starting interactive shell: %s", shell) - } else { - logger.Infof("executing command: %s", safeLogCommand(cmd)) - } - - s.handlePtyWithUserSwitching(logger, session, privilegeResult, ptyReq, winCh, cmd) + s.executeCommandWithPty(logger, session, nil, privilegeResult, ptyReq, nil) return true } @@ -294,11 +288,6 @@ func (s *Server) getShellCommandArgs(shell, cmdString string) []string { return []string{shell, "-Command", cmdString} } -func (s *Server) handlePtyWithUserSwitching(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, _ <-chan ssh.Window, _ []string) { - logger.Info("starting interactive shell") - s.executeConPtyCommand(logger, session, privilegeResult, ptyReq, session.RawCommand()) -} - type PtyExecutionRequest struct { Shell string Command string @@ -308,25 +297,25 @@ type PtyExecutionRequest struct { Domain string } -func executePtyCommandWithUserToken(ctx context.Context, session ssh.Session, req PtyExecutionRequest) error { - log.Tracef("executing Windows ConPty command with user switching: shell=%s, command=%s, user=%s\\%s, size=%dx%d", +func executePtyCommandWithUserToken(logger *log.Entry, session ssh.Session, req PtyExecutionRequest) error { + logger.Tracef("executing Windows ConPty command with user switching: shell=%s, command=%s, user=%s\\%s, size=%dx%d", req.Shell, req.Command, req.Domain, req.Username, req.Width, req.Height) - privilegeDropper := NewPrivilegeDropper() + privilegeDropper := NewPrivilegeDropper(WithLogger(logger)) userToken, err := privilegeDropper.createToken(req.Username, req.Domain) if err != nil { return fmt.Errorf("create user token: %w", err) } defer func() { if err := windows.CloseHandle(userToken); err != nil { - log.Debugf("close user token: %v", err) + logger.Debugf("close user token: %v", err) } }() server := &Server{} - userEnv, err := server.getUserEnvironmentWithToken(userToken, req.Username, req.Domain) + userEnv, err := server.getUserEnvironmentWithToken(logger, userToken, req.Username, req.Domain) if err != nil { - log.Debugf("failed to get user environment for %s\\%s, using system environment: %v", req.Domain, req.Username, err) + logger.Debugf("failed to get user environment for %s\\%s, using system environment: %v", req.Domain, req.Username, err) userEnv = os.Environ() } @@ -348,8 +337,8 @@ func executePtyCommandWithUserToken(ctx context.Context, session ssh.Session, re Environment: userEnv, } - log.Debugf("executePtyCommandWithUserToken: calling winpty execution with working dir: %s", workingDir) - return winpty.ExecutePtyWithUserToken(ctx, session, ptyConfig, userConfig) + logger.Debugf("executePtyCommandWithUserToken: calling winpty execution with working dir: %s", workingDir) + return winpty.ExecutePtyWithUserToken(session, ptyConfig, userConfig) } func getUserHomeFromEnv(env []string) string { @@ -371,10 +360,8 @@ func (s *Server) killProcessGroup(cmd *exec.Cmd) { return } - logger := log.WithField("pid", cmd.Process.Pid) - if err := cmd.Process.Kill(); err != nil { - logger.Debugf("kill process failed: %v", err) + log.Debugf("kill process %d failed: %v", cmd.Process.Pid, err) } } @@ -389,21 +376,7 @@ func (s *Server) detectUtilLinuxLogin(context.Context) bool { } // executeCommandWithPty executes a command with PTY allocation on Windows using ConPty -func (s *Server) executeCommandWithPty(logger *log.Entry, session ssh.Session, execCmd *exec.Cmd, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, winCh <-chan ssh.Window) bool { - command := session.RawCommand() - if command == "" { - logger.Error("no command specified for PTY execution") - if err := session.Exit(1); err != nil { - logSessionExitError(logger, err) - } - return false - } - - return s.executeConPtyCommand(logger, session, privilegeResult, ptyReq, command) -} - -// executeConPtyCommand executes a command using ConPty (common for interactive and command execution) -func (s *Server) executeConPtyCommand(logger *log.Entry, session ssh.Session, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, command string) bool { +func (s *Server) executeCommandWithPty(logger *log.Entry, session ssh.Session, _ *exec.Cmd, privilegeResult PrivilegeCheckResult, ptyReq ssh.Pty, _ <-chan ssh.Window) bool { localUser := privilegeResult.User if localUser == nil { logger.Errorf("no user in privilege result") @@ -415,14 +388,14 @@ func (s *Server) executeConPtyCommand(logger *log.Entry, session ssh.Session, pr req := PtyExecutionRequest{ Shell: shell, - Command: command, + Command: session.RawCommand(), Width: ptyReq.Window.Width, Height: ptyReq.Window.Height, Username: username, Domain: domain, } - if err := executePtyCommandWithUserToken(session.Context(), session, req); err != nil { + if err := executePtyCommandWithUserToken(logger, session, req); err != nil { logger.Errorf("ConPty execution failed: %v", err) if err := session.Exit(1); err != nil { logSessionExitError(logger, err) diff --git a/client/ssh/server/compatibility_test.go b/client/ssh/server/compatibility_test.go index 34ffccfd2..7fe2d6c5e 100644 --- a/client/ssh/server/compatibility_test.go +++ b/client/ssh/server/compatibility_test.go @@ -4,12 +4,15 @@ import ( "context" "crypto/ed25519" "crypto/rand" + "errors" "fmt" "io" "net" "os" "os/exec" + "path/filepath" "runtime" + "slices" "strings" "testing" "time" @@ -23,25 +26,67 @@ import ( "github.com/netbirdio/netbird/client/ssh/testutil" ) -// TestMain handles package-level setup and cleanup func TestMain(m *testing.M) { - // Guard against infinite recursion when test binary is called as "netbird ssh exec" - // This happens when running tests as non-privileged user with fallback + // On platforms where su doesn't support --pty (macOS, FreeBSD, Windows), the SSH server + // spawns an executor subprocess via os.Executable(). During tests, this invokes the test + // binary with "ssh exec" args. We handle that here to properly execute commands and + // propagate exit codes. if len(os.Args) > 2 && os.Args[1] == "ssh" && os.Args[2] == "exec" { - // Just exit with error to break the recursion - fmt.Fprintf(os.Stderr, "Test binary called as 'ssh exec' - preventing infinite recursion\n") - os.Exit(1) + runTestExecutor() + return } - // Run tests code := m.Run() - - // Cleanup any created test users testutil.CleanupTestUsers() - os.Exit(code) } +// runTestExecutor emulates the netbird executor for tests. +// Parses --shell and --cmd args, runs the command, and exits with the correct code. +func runTestExecutor() { + if os.Getenv("_NETBIRD_TEST_EXECUTOR") != "" { + fmt.Fprintf(os.Stderr, "executor recursion detected\n") + os.Exit(1) + } + os.Setenv("_NETBIRD_TEST_EXECUTOR", "1") + + shell := "/bin/sh" + var command string + for i := 3; i < len(os.Args); i++ { + switch os.Args[i] { + case "--shell": + if i+1 < len(os.Args) { + shell = os.Args[i+1] + i++ + } + case "--cmd": + if i+1 < len(os.Args) { + command = os.Args[i+1] + i++ + } + } + } + + var cmd *exec.Cmd + if command == "" { + cmd = exec.Command(shell) + } else { + cmd = exec.Command(shell, "-c", command) + } + cmd.Args[0] = "-" + filepath.Base(shell) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + os.Exit(exitErr.ExitCode()) + } + os.Exit(1) + } + os.Exit(0) +} + // TestSSHServerCompatibility tests that our SSH server is compatible with the system SSH client func TestSSHServerCompatibility(t *testing.T) { if testing.Short() { @@ -405,6 +450,171 @@ func createTempKeyFile(t *testing.T, privateKey []byte) (string, func()) { return createTempKeyFileFromBytes(t, privateKey) } +// TestSSHPtyModes tests different PTY allocation modes (-T, -t, -tt flags) +// This ensures our implementation matches OpenSSH behavior for: +// - ssh host command (no PTY - default when no TTY) +// - ssh -T host command (explicit no PTY) +// - ssh -t host command (force PTY) +// - ssh -T host (no PTY shell - our implementation) +func TestSSHPtyModes(t *testing.T) { + if testing.Short() { + t.Skip("Skipping SSH PTY mode tests in short mode") + } + + if !isSSHClientAvailable() { + t.Skip("SSH client not available on this system") + } + + if runtime.GOOS == "windows" && testutil.IsCI() { + t.Skip("Skipping Windows SSH PTY tests in CI due to S4U authentication issues") + } + + hostKey, err := nbssh.GeneratePrivateKey(nbssh.ED25519) + require.NoError(t, err) + + clientPrivKeyOpenSSH, _, err := generateOpenSSHKey(t) + require.NoError(t, err) + + serverConfig := &Config{ + HostKeyPEM: hostKey, + JWT: nil, + } + server := New(serverConfig) + server.SetAllowRootLogin(true) + + serverAddr := StartTestServer(t, server) + defer func() { + err := server.Stop() + require.NoError(t, err) + }() + + clientKeyFile, cleanupKey := createTempKeyFileFromBytes(t, clientPrivKeyOpenSSH) + defer cleanupKey() + + host, portStr, err := net.SplitHostPort(serverAddr) + require.NoError(t, err) + + username := testutil.GetTestUsername(t) + + baseArgs := []string{ + "-i", clientKeyFile, + "-p", portStr, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ConnectTimeout=5", + "-o", "BatchMode=yes", + } + + t.Run("command_default_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), fmt.Sprintf("%s@%s", username, host), "echo", "no_pty_default") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Command (default no PTY) failed: %s", output) + assert.Contains(t, string(output), "no_pty_default") + }) + + t.Run("command_explicit_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host), "echo", "explicit_no_pty") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Command (-T explicit no PTY) failed: %s", output) + assert.Contains(t, string(output), "explicit_no_pty") + }) + + t.Run("command_force_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-tt", fmt.Sprintf("%s@%s", username, host), "echo", "force_pty") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "Command (-tt force PTY) failed: %s", output) + assert.Contains(t, string(output), "force_pty") + }) + + t.Run("shell_explicit_no_pty", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host)) + cmd := exec.CommandContext(ctx, "ssh", args...) + + stdin, err := cmd.StdinPipe() + require.NoError(t, err) + + stdout, err := cmd.StdoutPipe() + require.NoError(t, err) + + require.NoError(t, cmd.Start(), "Shell (-T no PTY) start failed") + + go func() { + defer stdin.Close() + time.Sleep(100 * time.Millisecond) + _, err := stdin.Write([]byte("echo shell_no_pty_test\n")) + assert.NoError(t, err, "write echo command") + time.Sleep(100 * time.Millisecond) + _, err = stdin.Write([]byte("exit 0\n")) + assert.NoError(t, err, "write exit command") + }() + + output, _ := io.ReadAll(stdout) + err = cmd.Wait() + + require.NoError(t, err, "Shell (-T no PTY) failed: %s", output) + assert.Contains(t, string(output), "shell_no_pty_test") + }) + + t.Run("exit_code_preserved_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host), "exit", "42") + cmd := exec.Command("ssh", args...) + + err := cmd.Run() + require.Error(t, err, "Command should exit with non-zero") + + var exitErr *exec.ExitError + require.True(t, errors.As(err, &exitErr), "Should be an exit error: %v", err) + assert.Equal(t, 42, exitErr.ExitCode(), "Exit code should be preserved with -T") + }) + + t.Run("exit_code_preserved_with_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-tt", fmt.Sprintf("%s@%s", username, host), "sh -c 'exit 43'") + cmd := exec.Command("ssh", args...) + + err := cmd.Run() + require.Error(t, err, "PTY command should exit with non-zero") + + var exitErr *exec.ExitError + require.True(t, errors.As(err, &exitErr), "Should be an exit error: %v", err) + assert.Equal(t, 43, exitErr.ExitCode(), "Exit code should be preserved with -tt") + }) + + t.Run("stderr_works_no_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-T", fmt.Sprintf("%s@%s", username, host), + "sh -c 'echo stdout_msg; echo stderr_msg >&2'") + cmd := exec.Command("ssh", args...) + + var stdout, stderr strings.Builder + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + require.NoError(t, cmd.Run(), "stderr test failed") + assert.Contains(t, stdout.String(), "stdout_msg", "stdout should have stdout_msg") + assert.Contains(t, stderr.String(), "stderr_msg", "stderr should have stderr_msg") + assert.NotContains(t, stdout.String(), "stderr_msg", "stdout should NOT have stderr_msg") + }) + + t.Run("stderr_merged_with_pty", func(t *testing.T) { + args := append(slices.Clone(baseArgs), "-tt", fmt.Sprintf("%s@%s", username, host), + "sh -c 'echo stdout_msg; echo stderr_msg >&2'") + cmd := exec.Command("ssh", args...) + + output, err := cmd.CombinedOutput() + require.NoError(t, err, "PTY stderr test failed: %s", output) + assert.Contains(t, string(output), "stdout_msg") + assert.Contains(t, string(output), "stderr_msg") + }) +} + // TestSSHServerFeatureCompatibility tests specific SSH features for compatibility func TestSSHServerFeatureCompatibility(t *testing.T) { if testing.Short() { diff --git a/client/ssh/server/executor_unix.go b/client/ssh/server/executor_unix.go index 8adc824ef..ee0b0ff78 100644 --- a/client/ssh/server/executor_unix.go +++ b/client/ssh/server/executor_unix.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "os/exec" + "path/filepath" "runtime" "strings" "syscall" @@ -35,11 +36,35 @@ type ExecutorConfig struct { } // PrivilegeDropper handles secure privilege dropping in child processes -type PrivilegeDropper struct{} +type PrivilegeDropper struct { + logger *log.Entry +} + +// PrivilegeDropperOption is a functional option for configuring PrivilegeDropper +type PrivilegeDropperOption func(*PrivilegeDropper) // NewPrivilegeDropper creates a new privilege dropper -func NewPrivilegeDropper() *PrivilegeDropper { - return &PrivilegeDropper{} +func NewPrivilegeDropper(opts ...PrivilegeDropperOption) *PrivilegeDropper { + pd := &PrivilegeDropper{} + for _, opt := range opts { + opt(pd) + } + return pd +} + +// WithLogger sets the logger for the PrivilegeDropper +func WithLogger(logger *log.Entry) PrivilegeDropperOption { + return func(pd *PrivilegeDropper) { + pd.logger = logger + } +} + +// log returns the logger, falling back to standard logger if none set +func (pd *PrivilegeDropper) log() *log.Entry { + if pd.logger != nil { + return pd.logger + } + return log.NewEntry(log.StandardLogger()) } // CreateExecutorCommand creates a command that spawns netbird ssh exec for privilege dropping @@ -83,7 +108,7 @@ func (pd *PrivilegeDropper) CreateExecutorCommand(ctx context.Context, config Ex break } } - log.Tracef("creating executor command: %s %v", netbirdPath, safeArgs) + pd.log().Tracef("creating executor command: %s %v", netbirdPath, safeArgs) return exec.CommandContext(ctx, netbirdPath, args...), nil } @@ -206,17 +231,22 @@ func (pd *PrivilegeDropper) ExecuteWithPrivilegeDrop(ctx context.Context, config var execCmd *exec.Cmd if config.Command == "" { - os.Exit(ExitCodeSuccess) + execCmd = exec.CommandContext(ctx, config.Shell) + } else { + execCmd = exec.CommandContext(ctx, config.Shell, "-c", config.Command) } - - execCmd = exec.CommandContext(ctx, config.Shell, "-c", config.Command) + execCmd.Args[0] = "-" + filepath.Base(config.Shell) execCmd.Stdin = os.Stdin execCmd.Stdout = os.Stdout execCmd.Stderr = os.Stderr - cmdParts := strings.Fields(config.Command) - safeCmd := safeLogCommand(cmdParts) - log.Tracef("executing %s -c %s", execCmd.Path, safeCmd) + if config.Command == "" { + log.Tracef("executing login shell: %s", execCmd.Path) + } else { + cmdParts := strings.Fields(config.Command) + safeCmd := safeLogCommand(cmdParts) + log.Tracef("executing %s -c %s", execCmd.Path, safeCmd) + } if err := execCmd.Run(); err != nil { var exitError *exec.ExitError if errors.As(err, &exitError) { diff --git a/client/ssh/server/executor_windows.go b/client/ssh/server/executor_windows.go index d3504e056..51c995ec3 100644 --- a/client/ssh/server/executor_windows.go +++ b/client/ssh/server/executor_windows.go @@ -28,22 +28,45 @@ const ( ) type WindowsExecutorConfig struct { - Username string - Domain string - WorkingDir string - Shell string - Command string - Args []string - Interactive bool - Pty bool - PtyWidth int - PtyHeight int + Username string + Domain string + WorkingDir string + Shell string + Command string + Args []string + Pty bool + PtyWidth int + PtyHeight int } -type PrivilegeDropper struct{} +type PrivilegeDropper struct { + logger *log.Entry +} -func NewPrivilegeDropper() *PrivilegeDropper { - return &PrivilegeDropper{} +// PrivilegeDropperOption is a functional option for configuring PrivilegeDropper +type PrivilegeDropperOption func(*PrivilegeDropper) + +func NewPrivilegeDropper(opts ...PrivilegeDropperOption) *PrivilegeDropper { + pd := &PrivilegeDropper{} + for _, opt := range opts { + opt(pd) + } + return pd +} + +// WithLogger sets the logger for the PrivilegeDropper +func WithLogger(logger *log.Entry) PrivilegeDropperOption { + return func(pd *PrivilegeDropper) { + pd.logger = logger + } +} + +// log returns the logger, falling back to standard logger if none set +func (pd *PrivilegeDropper) log() *log.Entry { + if pd.logger != nil { + return pd.logger + } + return log.NewEntry(log.StandardLogger()) } var ( @@ -56,7 +79,6 @@ const ( // Common error messages commandFlag = "-Command" - closeTokenErrorMsg = "close token error: %v" // #nosec G101 -- This is an error message template, not credentials convertUsernameError = "convert username to UTF16: %w" convertDomainError = "convert domain to UTF16: %w" ) @@ -80,7 +102,7 @@ func (pd *PrivilegeDropper) CreateWindowsExecutorCommand(ctx context.Context, co shellArgs = []string{shell} } - log.Tracef("creating Windows direct shell command: %s %v", shellArgs[0], shellArgs) + pd.log().Tracef("creating Windows direct shell command: %s %v", shellArgs[0], shellArgs) cmd, token, err := pd.CreateWindowsProcessAsUser( ctx, shellArgs[0], shellArgs, config.Username, config.Domain, config.WorkingDir) @@ -180,10 +202,10 @@ func newLsaString(s string) lsaString { // generateS4UUserToken creates a Windows token using S4U authentication // This is the exact approach OpenSSH for Windows uses for public key authentication -func generateS4UUserToken(username, domain string) (windows.Handle, error) { +func generateS4UUserToken(logger *log.Entry, username, domain string) (windows.Handle, error) { userCpn := buildUserCpn(username, domain) - pd := NewPrivilegeDropper() + pd := NewPrivilegeDropper(WithLogger(logger)) isDomainUser := !pd.isLocalUser(domain) lsaHandle, err := initializeLsaConnection() @@ -197,12 +219,12 @@ func generateS4UUserToken(username, domain string) (windows.Handle, error) { return 0, err } - logonInfo, logonInfoSize, err := prepareS4ULogonStructure(username, domain, isDomainUser) + logonInfo, logonInfoSize, err := prepareS4ULogonStructure(logger, username, domain, isDomainUser) if err != nil { return 0, err } - return performS4ULogon(lsaHandle, authPackageId, logonInfo, logonInfoSize, userCpn, isDomainUser) + return performS4ULogon(logger, lsaHandle, authPackageId, logonInfo, logonInfoSize, userCpn, isDomainUser) } // buildUserCpn constructs the user principal name @@ -310,21 +332,21 @@ func lookupPrincipalName(username, domain string) (string, error) { } // prepareS4ULogonStructure creates the appropriate S4U logon structure -func prepareS4ULogonStructure(username, domain string, isDomainUser bool) (unsafe.Pointer, uintptr, error) { +func prepareS4ULogonStructure(logger *log.Entry, username, domain string, isDomainUser bool) (unsafe.Pointer, uintptr, error) { if isDomainUser { - return prepareDomainS4ULogon(username, domain) + return prepareDomainS4ULogon(logger, username, domain) } - return prepareLocalS4ULogon(username) + return prepareLocalS4ULogon(logger, username) } // prepareDomainS4ULogon creates S4U logon structure for domain users -func prepareDomainS4ULogon(username, domain string) (unsafe.Pointer, uintptr, error) { +func prepareDomainS4ULogon(logger *log.Entry, username, domain string) (unsafe.Pointer, uintptr, error) { upn, err := lookupPrincipalName(username, domain) if err != nil { return nil, 0, fmt.Errorf("lookup principal name: %w", err) } - log.Debugf("using KerbS4ULogon for domain user with UPN: %s", upn) + logger.Debugf("using KerbS4ULogon for domain user with UPN: %s", upn) upnUtf16, err := windows.UTF16FromString(upn) if err != nil { @@ -357,8 +379,8 @@ func prepareDomainS4ULogon(username, domain string) (unsafe.Pointer, uintptr, er } // prepareLocalS4ULogon creates S4U logon structure for local users -func prepareLocalS4ULogon(username string) (unsafe.Pointer, uintptr, error) { - log.Debugf("using Msv1_0S4ULogon for local user: %s", username) +func prepareLocalS4ULogon(logger *log.Entry, username string) (unsafe.Pointer, uintptr, error) { + logger.Debugf("using Msv1_0S4ULogon for local user: %s", username) usernameUtf16, err := windows.UTF16FromString(username) if err != nil { @@ -406,11 +428,11 @@ func prepareLocalS4ULogon(username string) (unsafe.Pointer, uintptr, error) { } // performS4ULogon executes the S4U logon operation -func performS4ULogon(lsaHandle windows.Handle, authPackageId uint32, logonInfo unsafe.Pointer, logonInfoSize uintptr, userCpn string, isDomainUser bool) (windows.Handle, error) { +func performS4ULogon(logger *log.Entry, lsaHandle windows.Handle, authPackageId uint32, logonInfo unsafe.Pointer, logonInfoSize uintptr, userCpn string, isDomainUser bool) (windows.Handle, error) { var tokenSource tokenSource copy(tokenSource.SourceName[:], "netbird") if ret, _, _ := procAllocateLocallyUniqueId.Call(uintptr(unsafe.Pointer(&tokenSource.SourceIdentifier))); ret == 0 { - log.Debugf("AllocateLocallyUniqueId failed") + logger.Debugf("AllocateLocallyUniqueId failed") } originName := newLsaString("netbird") @@ -441,7 +463,7 @@ func performS4ULogon(lsaHandle windows.Handle, authPackageId uint32, logonInfo u if profile != 0 { if ret, _, _ := procLsaFreeReturnBuffer.Call(profile); ret != StatusSuccess { - log.Debugf("LsaFreeReturnBuffer failed: 0x%x", ret) + logger.Debugf("LsaFreeReturnBuffer failed: 0x%x", ret) } } @@ -449,7 +471,7 @@ func performS4ULogon(lsaHandle windows.Handle, authPackageId uint32, logonInfo u return 0, fmt.Errorf("LsaLogonUser S4U for %s: NTSTATUS=0x%x, SubStatus=0x%x", userCpn, ret, subStatus) } - log.Debugf("created S4U %s token for user %s", + logger.Debugf("created S4U %s token for user %s", map[bool]string{true: "domain", false: "local"}[isDomainUser], userCpn) return token, nil } @@ -497,8 +519,8 @@ func (pd *PrivilegeDropper) isLocalUser(domain string) bool { // authenticateLocalUser handles authentication for local users func (pd *PrivilegeDropper) authenticateLocalUser(username, fullUsername string) (windows.Handle, error) { - log.Debugf("using S4U authentication for local user %s", fullUsername) - token, err := generateS4UUserToken(username, ".") + pd.log().Debugf("using S4U authentication for local user %s", fullUsername) + token, err := generateS4UUserToken(pd.log(), username, ".") if err != nil { return 0, fmt.Errorf("S4U authentication for local user %s: %w", fullUsername, err) } @@ -507,12 +529,12 @@ func (pd *PrivilegeDropper) authenticateLocalUser(username, fullUsername string) // authenticateDomainUser handles authentication for domain users func (pd *PrivilegeDropper) authenticateDomainUser(username, domain, fullUsername string) (windows.Handle, error) { - log.Debugf("using S4U authentication for domain user %s", fullUsername) - token, err := generateS4UUserToken(username, domain) + pd.log().Debugf("using S4U authentication for domain user %s", fullUsername) + token, err := generateS4UUserToken(pd.log(), username, domain) if err != nil { return 0, fmt.Errorf("S4U authentication for domain user %s: %w", fullUsername, err) } - log.Debugf("Successfully created S4U token for domain user %s", fullUsername) + pd.log().Debugf("successfully created S4U token for domain user %s", fullUsername) return token, nil } @@ -526,7 +548,7 @@ func (pd *PrivilegeDropper) CreateWindowsProcessAsUser(ctx context.Context, exec defer func() { if err := windows.CloseHandle(token); err != nil { - log.Debugf("close impersonation token: %v", err) + pd.log().Debugf("close impersonation token: %v", err) } }() @@ -564,7 +586,7 @@ func (pd *PrivilegeDropper) createProcessWithToken(ctx context.Context, sourceTo return cmd, primaryToken, nil } -// createSuCommand creates a command using su -l -c for privilege switching (Windows stub) -func (s *Server) createSuCommand(ssh.Session, *user.User, bool) (*exec.Cmd, error) { +// createSuCommand creates a command using su - for privilege switching (Windows stub). +func (s *Server) createSuCommand(*log.Entry, ssh.Session, *user.User, bool) (*exec.Cmd, error) { return nil, fmt.Errorf("su command not available on Windows") } diff --git a/client/ssh/server/jwt_test.go b/client/ssh/server/jwt_test.go index dbef011ac..b2f3ac6a0 100644 --- a/client/ssh/server/jwt_test.go +++ b/client/ssh/server/jwt_test.go @@ -54,7 +54,7 @@ func TestJWTEnforcement(t *testing.T) { server.SetAllowRootLogin(true) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -88,7 +88,7 @@ func TestJWTEnforcement(t *testing.T) { serverNoJWT.SetAllowRootLogin(true) serverAddrNoJWT := StartTestServer(t, serverNoJWT) - defer require.NoError(t, serverNoJWT.Stop()) + defer func() { require.NoError(t, serverNoJWT.Stop()) }() hostNoJWT, portStrNoJWT, err := net.SplitHostPort(serverAddrNoJWT) require.NoError(t, err) @@ -213,7 +213,7 @@ func TestJWTDetection(t *testing.T) { server.SetAllowRootLogin(true) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -341,7 +341,7 @@ func TestJWTFailClose(t *testing.T) { server.SetAllowRootLogin(true) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -596,7 +596,7 @@ func TestJWTAuthentication(t *testing.T) { server.UpdateSSHAuth(authConfig) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) @@ -715,7 +715,7 @@ func TestJWTMultipleAudiences(t *testing.T) { server.UpdateSSHAuth(authConfig) serverAddr := StartTestServer(t, server) - defer require.NoError(t, server.Stop()) + defer func() { require.NoError(t, server.Stop()) }() host, portStr, err := net.SplitHostPort(serverAddr) require.NoError(t, err) diff --git a/client/ssh/server/port_forwarding.go b/client/ssh/server/port_forwarding.go index c60cf4f58..e16ff5d46 100644 --- a/client/ssh/server/port_forwarding.go +++ b/client/ssh/server/port_forwarding.go @@ -271,13 +271,6 @@ func (s *Server) isRemotePortForwardingAllowed() bool { return s.allowRemotePortForwarding } -// isPortForwardingEnabled checks if any port forwarding (local or remote) is enabled -func (s *Server) isPortForwardingEnabled() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.allowLocalPortForwarding || s.allowRemotePortForwarding -} - // parseTcpipForwardRequest parses the SSH request payload func (s *Server) parseTcpipForwardRequest(req *cryptossh.Request) (*tcpipForwardMsg, error) { var payload tcpipForwardMsg diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index a663614f4..1ddb60f8e 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -335,7 +335,7 @@ func (s *Server) GetStatus() (enabled bool, sessions []SessionInfo) { sessions = append(sessions, info) } - // Add authenticated connections without sessions (e.g., -N/-T or port-forwarding only) + // Add authenticated connections without sessions (e.g., -N or port-forwarding only) for key, connState := range s.connections { remoteAddr := string(key) if reportedAddrs[remoteAddr] { @@ -440,12 +440,8 @@ func (s *Server) ensureJWTValidator() error { ) // Use custom userIDClaim from authorizer if available - audience := "" - if len(config.Audiences) != 0 { - audience = config.Audiences[0] - } extractorOptions := []jwt.ClaimsExtractorOption{ - jwt.WithAudience(audience), + jwt.WithAudience(config.Audiences[0]), } if authorizer.GetUserIDClaim() != "" { extractorOptions = append(extractorOptions, jwt.WithUserIDClaim(authorizer.GetUserIDClaim())) diff --git a/client/ssh/server/server_config_test.go b/client/ssh/server/server_config_test.go index d85d85a51..f70e29963 100644 --- a/client/ssh/server/server_config_test.go +++ b/client/ssh/server/server_config_test.go @@ -483,12 +483,11 @@ func TestServer_IsPrivilegedUser(t *testing.T) { } } -func TestServer_PortForwardingOnlySession(t *testing.T) { - // Test that sessions without PTY and command are allowed when port forwarding is enabled +func TestServer_NonPtyShellSession(t *testing.T) { + // Test that non-PTY shell sessions (ssh -T) work regardless of port forwarding settings. currentUser, err := user.Current() require.NoError(t, err, "Should be able to get current user") - // Generate host key for server hostKey, err := ssh.GeneratePrivateKey(ssh.ED25519) require.NoError(t, err) @@ -496,36 +495,26 @@ func TestServer_PortForwardingOnlySession(t *testing.T) { name string allowLocalForwarding bool allowRemoteForwarding bool - expectAllowed bool - description string }{ { - name: "session_allowed_with_local_forwarding", + name: "shell_with_local_forwarding_enabled", allowLocalForwarding: true, allowRemoteForwarding: false, - expectAllowed: true, - description: "Port-forwarding-only session should be allowed when local forwarding is enabled", }, { - name: "session_allowed_with_remote_forwarding", + name: "shell_with_remote_forwarding_enabled", allowLocalForwarding: false, allowRemoteForwarding: true, - expectAllowed: true, - description: "Port-forwarding-only session should be allowed when remote forwarding is enabled", }, { - name: "session_allowed_with_both", + name: "shell_with_both_forwarding_enabled", allowLocalForwarding: true, allowRemoteForwarding: true, - expectAllowed: true, - description: "Port-forwarding-only session should be allowed when both forwarding types enabled", }, { - name: "session_denied_without_forwarding", + name: "shell_with_forwarding_disabled", allowLocalForwarding: false, allowRemoteForwarding: false, - expectAllowed: false, - description: "Port-forwarding-only session should be denied when all forwarding is disabled", }, } @@ -545,7 +534,6 @@ func TestServer_PortForwardingOnlySession(t *testing.T) { _ = server.Stop() }() - // Connect to the server without requesting PTY or command ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -557,20 +545,10 @@ func TestServer_PortForwardingOnlySession(t *testing.T) { _ = client.Close() }() - // Execute a command without PTY - this simulates ssh -T with no command - // The server should either allow it (port forwarding enabled) or reject it - output, err := client.ExecuteCommand(ctx, "") - if tt.expectAllowed { - // When allowed, the session stays open until cancelled - // ExecuteCommand with empty command should return without error - assert.NoError(t, err, "Session should be allowed when port forwarding is enabled") - assert.NotContains(t, output, "port forwarding is disabled", - "Output should not contain port forwarding disabled message") - } else if err != nil { - // When denied, we expect an error message about port forwarding being disabled - assert.Contains(t, err.Error(), "port forwarding is disabled", - "Should get port forwarding disabled message") - } + // Execute without PTY and no command - simulates ssh -T (shell without PTY) + // Should always succeed regardless of port forwarding settings + _, err = client.ExecuteCommand(ctx, "") + assert.NoError(t, err, "Non-PTY shell session should be allowed") }) } } diff --git a/client/ssh/server/server_test.go b/client/ssh/server/server_test.go index 661068539..89fab717f 100644 --- a/client/ssh/server/server_test.go +++ b/client/ssh/server/server_test.go @@ -405,12 +405,14 @@ func TestSSHServer_WindowsShellHandling(t *testing.T) { assert.Equal(t, "-Command", args[1]) assert.Equal(t, "echo test", args[2]) } else { - // Test Unix shell behavior args := server.getShellCommandArgs("/bin/sh", "echo test") assert.Equal(t, "/bin/sh", args[0]) - assert.Equal(t, "-l", args[1]) - assert.Equal(t, "-c", args[2]) - assert.Equal(t, "echo test", args[3]) + assert.Equal(t, "-c", args[1]) + assert.Equal(t, "echo test", args[2]) + + args = server.getShellCommandArgs("/bin/sh", "") + assert.Equal(t, "/bin/sh", args[0]) + assert.Len(t, args, 1) } } diff --git a/client/ssh/server/session_handlers.go b/client/ssh/server/session_handlers.go index 3fd578064..f12a75961 100644 --- a/client/ssh/server/session_handlers.go +++ b/client/ssh/server/session_handlers.go @@ -62,54 +62,12 @@ func (s *Server) sessionHandler(session ssh.Session) { ptyReq, winCh, isPty := session.Pty() hasCommand := len(session.Command()) > 0 - switch { - case isPty && hasCommand: - // ssh -t - Pty command execution - s.handleCommand(logger, session, privilegeResult, winCh) - case isPty: - // ssh - Pty interactive session (login) - s.handlePty(logger, session, privilegeResult, ptyReq, winCh) - case hasCommand: - // ssh - non-Pty command execution - s.handleCommand(logger, session, privilegeResult, nil) - default: - // ssh -T (or ssh -N) - no PTY, no command - s.handleNonInteractiveSession(logger, session) - } -} - -// handleNonInteractiveSession handles sessions that have no PTY and no command. -// These are typically used for port forwarding (ssh -L/-R) or tunneling (ssh -N). -func (s *Server) handleNonInteractiveSession(logger *log.Entry, session ssh.Session) { - s.updateSessionType(session, cmdNonInteractive) - - if !s.isPortForwardingEnabled() { - if _, err := io.WriteString(session, "port forwarding is disabled on this server\n"); err != nil { - logger.Debugf(errWriteSession, err) - } - if err := session.Exit(1); err != nil { - logSessionExitError(logger, err) - } - logger.Infof("rejected non-interactive session: port forwarding disabled") - return - } - - <-session.Context().Done() - - if err := session.Exit(0); err != nil { - logSessionExitError(logger, err) - } -} - -func (s *Server) updateSessionType(session ssh.Session, sessionType string) { - s.mu.Lock() - defer s.mu.Unlock() - - for _, state := range s.sessions { - if state.session == session { - state.sessionType = sessionType - return - } + if isPty && !hasCommand { + // ssh - PTY interactive session (login) + s.handlePtyLogin(logger, session, privilegeResult, ptyReq, winCh) + } else { + // ssh , ssh -t , ssh -T - command or shell execution + s.handleExecution(logger, session, privilegeResult, ptyReq, winCh) } } diff --git a/client/ssh/server/session_handlers_js.go b/client/ssh/server/session_handlers_js.go index c35e4da0b..4a6cf3d92 100644 --- a/client/ssh/server/session_handlers_js.go +++ b/client/ssh/server/session_handlers_js.go @@ -9,8 +9,8 @@ import ( log "github.com/sirupsen/logrus" ) -// handlePty is not supported on JS/WASM -func (s *Server) handlePty(logger *log.Entry, session ssh.Session, _ PrivilegeCheckResult, _ ssh.Pty, _ <-chan ssh.Window) bool { +// handlePtyLogin is not supported on JS/WASM +func (s *Server) handlePtyLogin(logger *log.Entry, session ssh.Session, _ PrivilegeCheckResult, _ ssh.Pty, _ <-chan ssh.Window) bool { errorMsg := "PTY sessions are not supported on WASM/JS platform\n" if _, err := fmt.Fprint(session.Stderr(), errorMsg); err != nil { logger.Debugf(errWriteSession, err) diff --git a/client/ssh/server/test.go b/client/ssh/server/test.go index f8abd1752..454d3afa3 100644 --- a/client/ssh/server/test.go +++ b/client/ssh/server/test.go @@ -8,19 +8,18 @@ import ( "time" ) +// StartTestServer starts the SSH server and returns the address it's listening on. func StartTestServer(t *testing.T, server *Server) string { started := make(chan string, 1) errChan := make(chan error, 1) go func() { - // Use port 0 to let the OS assign a free port addrPort := netip.MustParseAddrPort("127.0.0.1:0") if err := server.Start(context.Background(), addrPort); err != nil { errChan <- err return } - // Get the actual listening address from the server actualAddr := server.Addr() if actualAddr == nil { errChan <- fmt.Errorf("server started but no listener address available") diff --git a/client/ssh/server/userswitching_unix.go b/client/ssh/server/userswitching_unix.go index bc1557419..d80b77042 100644 --- a/client/ssh/server/userswitching_unix.go +++ b/client/ssh/server/userswitching_unix.go @@ -181,8 +181,8 @@ func (s *Server) getSupplementaryGroups(username string) ([]uint32, error) { // createExecutorCommand creates a command that spawns netbird ssh exec for privilege dropping. // Returns the command and a cleanup function (no-op on Unix). -func (s *Server) createExecutorCommand(session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { - log.Debugf("creating executor command for user %s (Pty: %v)", localUser.Username, hasPty) +func (s *Server) createExecutorCommand(logger *log.Entry, session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { + logger.Debugf("creating executor command for user %s (Pty: %v)", localUser.Username, hasPty) if err := validateUsername(localUser.Username); err != nil { return nil, nil, fmt.Errorf("invalid username %q: %w", localUser.Username, err) @@ -192,7 +192,7 @@ func (s *Server) createExecutorCommand(session ssh.Session, localUser *user.User if err != nil { return nil, nil, fmt.Errorf("parse user credentials: %w", err) } - privilegeDropper := NewPrivilegeDropper() + privilegeDropper := NewPrivilegeDropper(WithLogger(logger)) config := ExecutorConfig{ UID: uid, GID: gid, @@ -233,7 +233,7 @@ func (s *Server) createDirectPtyCommand(session ssh.Session, localUser *user.Use shell := getUserShell(localUser.Uid) args := s.getShellCommandArgs(shell, session.RawCommand()) - cmd := exec.CommandContext(session.Context(), args[0], args[1:]...) + cmd := s.createShellCommand(session.Context(), shell, args) cmd.Dir = localUser.HomeDir cmd.Env = s.preparePtyEnv(localUser, ptyReq, session) diff --git a/client/ssh/server/userswitching_windows.go b/client/ssh/server/userswitching_windows.go index 5a5f75fa4..260e1301e 100644 --- a/client/ssh/server/userswitching_windows.go +++ b/client/ssh/server/userswitching_windows.go @@ -88,20 +88,20 @@ func validateUsernameFormat(username string) error { // createExecutorCommand creates a command using Windows executor for privilege dropping. // Returns the command and a cleanup function that must be called after starting the process. -func (s *Server) createExecutorCommand(session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { - log.Debugf("creating Windows executor command for user %s (Pty: %v)", localUser.Username, hasPty) +func (s *Server) createExecutorCommand(logger *log.Entry, session ssh.Session, localUser *user.User, hasPty bool) (*exec.Cmd, func(), error) { + logger.Debugf("creating Windows executor command for user %s (Pty: %v)", localUser.Username, hasPty) username, _ := s.parseUsername(localUser.Username) if err := validateUsername(username); err != nil { return nil, nil, fmt.Errorf("invalid username %q: %w", username, err) } - return s.createUserSwitchCommand(localUser, session, hasPty) + return s.createUserSwitchCommand(logger, session, localUser) } // createUserSwitchCommand creates a command with Windows user switching. // Returns the command and a cleanup function that must be called after starting the process. -func (s *Server) createUserSwitchCommand(localUser *user.User, session ssh.Session, interactive bool) (*exec.Cmd, func(), error) { +func (s *Server) createUserSwitchCommand(logger *log.Entry, session ssh.Session, localUser *user.User) (*exec.Cmd, func(), error) { username, domain := s.parseUsername(localUser.Username) shell := getUserShell(localUser.Uid) @@ -113,15 +113,14 @@ func (s *Server) createUserSwitchCommand(localUser *user.User, session ssh.Sessi } config := WindowsExecutorConfig{ - Username: username, - Domain: domain, - WorkingDir: localUser.HomeDir, - Shell: shell, - Command: command, - Interactive: interactive || (rawCmd == ""), + Username: username, + Domain: domain, + WorkingDir: localUser.HomeDir, + Shell: shell, + Command: command, } - dropper := NewPrivilegeDropper() + dropper := NewPrivilegeDropper(WithLogger(logger)) cmd, token, err := dropper.CreateWindowsExecutorCommand(session.Context(), config) if err != nil { return nil, nil, err @@ -130,7 +129,7 @@ func (s *Server) createUserSwitchCommand(localUser *user.User, session ssh.Sessi cleanup := func() { if token != 0 { if err := windows.CloseHandle(windows.Handle(token)); err != nil { - log.Debugf("close primary token: %v", err) + logger.Debugf("close primary token: %v", err) } } } diff --git a/client/ssh/server/winpty/conpty.go b/client/ssh/server/winpty/conpty.go index 0f3659ffe..c08ccfd05 100644 --- a/client/ssh/server/winpty/conpty.go +++ b/client/ssh/server/winpty/conpty.go @@ -56,7 +56,7 @@ var ( ) // ExecutePtyWithUserToken executes a command with ConPty using user token. -func ExecutePtyWithUserToken(ctx context.Context, session ssh.Session, ptyConfig PtyConfig, userConfig UserConfig) error { +func ExecutePtyWithUserToken(session ssh.Session, ptyConfig PtyConfig, userConfig UserConfig) error { args := buildShellArgs(ptyConfig.Shell, ptyConfig.Command) commandLine := buildCommandLine(args) @@ -64,7 +64,7 @@ func ExecutePtyWithUserToken(ctx context.Context, session ssh.Session, ptyConfig Pty: ptyConfig, User: userConfig, Session: session, - Context: ctx, + Context: session.Context(), } return executeConPtyWithConfig(commandLine, config) diff --git a/client/status/status.go b/client/status/status.go index 305797eee..f13163a41 100644 --- a/client/status/status.go +++ b/client/status/status.go @@ -11,8 +11,12 @@ import ( "strings" "time" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" "gopkg.in/yaml.v3" + "golang.org/x/exp/maps" + "github.com/netbirdio/netbird/client/anonymize" "github.com/netbirdio/netbird/client/internal/peer" probeRelay "github.com/netbirdio/netbird/client/internal/relay" @@ -116,9 +120,7 @@ type OutputOverview struct { SSHServerState SSHServerStateOutput `json:"sshServer" yaml:"sshServer"` } -func ConvertToStatusOutputOverview(resp *proto.StatusResponse, anon bool, statusFilter string, prefixNamesFilter []string, prefixNamesFilterMap map[string]struct{}, ipsFilter map[string]struct{}, connectionTypeFilter string, profName string) OutputOverview { - pbFullStatus := resp.GetFullStatus() - +func ConvertToStatusOutputOverview(pbFullStatus *proto.FullStatus, anon bool, daemonVersion string, statusFilter string, prefixNamesFilter []string, prefixNamesFilterMap map[string]struct{}, ipsFilter map[string]struct{}, connectionTypeFilter string, profName string) OutputOverview { managementState := pbFullStatus.GetManagementState() managementOverview := ManagementStateOutput{ URL: managementState.GetURL(), @@ -134,13 +136,13 @@ func ConvertToStatusOutputOverview(resp *proto.StatusResponse, anon bool, status } relayOverview := mapRelays(pbFullStatus.GetRelays()) - peersOverview := mapPeers(resp.GetFullStatus().GetPeers(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilter, connectionTypeFilter) sshServerOverview := mapSSHServer(pbFullStatus.GetSshServerState()) + peersOverview := mapPeers(pbFullStatus.GetPeers(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilter, connectionTypeFilter) overview := OutputOverview{ Peers: peersOverview, CliVersion: version.NetbirdVersion(), - DaemonVersion: resp.GetDaemonVersion(), + DaemonVersion: daemonVersion, ManagementState: managementOverview, SignalState: signalOverview, Relays: relayOverview, @@ -489,6 +491,11 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS peersCountString := fmt.Sprintf("%d/%d Connected", o.Peers.Connected, o.Peers.Total) + var forwardingRulesString string + if o.NumberOfForwardingRules > 0 { + forwardingRulesString = fmt.Sprintf("Forwarding rules: %d\n", o.NumberOfForwardingRules) + } + goos := runtime.GOOS goarch := runtime.GOARCH goarm := "" @@ -512,7 +519,7 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS "Lazy connection: %s\n"+ "SSH Server: %s\n"+ "Networks: %s\n"+ - "Forwarding rules: %d\n"+ + "%s"+ "Peers count: %s\n", fmt.Sprintf("%s/%s%s", goos, goarch, goarm), o.DaemonVersion, @@ -529,7 +536,7 @@ func (o *OutputOverview) GeneralSummary(showURL bool, showRelays bool, showNameS lazyConnectionEnabledStatus, sshServerStatus, networks, - o.NumberOfForwardingRules, + forwardingRulesString, peersCountString, ) return summary @@ -553,6 +560,94 @@ func (o *OutputOverview) FullDetailSummary() string { ) } +func ToProtoFullStatus(fullStatus peer.FullStatus) *proto.FullStatus { + pbFullStatus := proto.FullStatus{ + ManagementState: &proto.ManagementState{}, + SignalState: &proto.SignalState{}, + LocalPeerState: &proto.LocalPeerState{}, + Peers: []*proto.PeerState{}, + } + + pbFullStatus.ManagementState.URL = fullStatus.ManagementState.URL + pbFullStatus.ManagementState.Connected = fullStatus.ManagementState.Connected + if err := fullStatus.ManagementState.Error; err != nil { + pbFullStatus.ManagementState.Error = err.Error() + } + + pbFullStatus.SignalState.URL = fullStatus.SignalState.URL + pbFullStatus.SignalState.Connected = fullStatus.SignalState.Connected + if err := fullStatus.SignalState.Error; err != nil { + pbFullStatus.SignalState.Error = err.Error() + } + + pbFullStatus.LocalPeerState.IP = fullStatus.LocalPeerState.IP + pbFullStatus.LocalPeerState.PubKey = fullStatus.LocalPeerState.PubKey + pbFullStatus.LocalPeerState.KernelInterface = fullStatus.LocalPeerState.KernelInterface + pbFullStatus.LocalPeerState.Fqdn = fullStatus.LocalPeerState.FQDN + pbFullStatus.LocalPeerState.RosenpassPermissive = fullStatus.RosenpassState.Permissive + pbFullStatus.LocalPeerState.RosenpassEnabled = fullStatus.RosenpassState.Enabled + pbFullStatus.LocalPeerState.Networks = maps.Keys(fullStatus.LocalPeerState.Routes) + pbFullStatus.NumberOfForwardingRules = int32(fullStatus.NumOfForwardingRules) + pbFullStatus.LazyConnectionEnabled = fullStatus.LazyConnectionEnabled + + for _, peerState := range fullStatus.Peers { + pbPeerState := &proto.PeerState{ + IP: peerState.IP, + PubKey: peerState.PubKey, + ConnStatus: peerState.ConnStatus.String(), + ConnStatusUpdate: timestamppb.New(peerState.ConnStatusUpdate), + Relayed: peerState.Relayed, + LocalIceCandidateType: peerState.LocalIceCandidateType, + RemoteIceCandidateType: peerState.RemoteIceCandidateType, + LocalIceCandidateEndpoint: peerState.LocalIceCandidateEndpoint, + RemoteIceCandidateEndpoint: peerState.RemoteIceCandidateEndpoint, + RelayAddress: peerState.RelayServerAddress, + Fqdn: peerState.FQDN, + LastWireguardHandshake: timestamppb.New(peerState.LastWireguardHandshake), + BytesRx: peerState.BytesRx, + BytesTx: peerState.BytesTx, + RosenpassEnabled: peerState.RosenpassEnabled, + Networks: maps.Keys(peerState.GetRoutes()), + Latency: durationpb.New(peerState.Latency), + SshHostKey: peerState.SSHHostKey, + } + pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState) + } + + for _, relayState := range fullStatus.Relays { + pbRelayState := &proto.RelayState{ + URI: relayState.URI, + Available: relayState.Err == nil, + } + if err := relayState.Err; err != nil { + pbRelayState.Error = err.Error() + } + pbFullStatus.Relays = append(pbFullStatus.Relays, pbRelayState) + } + + for _, dnsState := range fullStatus.NSGroupStates { + var err string + if dnsState.Error != nil { + err = dnsState.Error.Error() + } + + var servers []string + for _, server := range dnsState.Servers { + servers = append(servers, server.String()) + } + + pbDnsState := &proto.NSGroupState{ + Servers: servers, + Domains: dnsState.Domains, + Enabled: dnsState.Enabled, + Error: err, + } + pbFullStatus.DnsServers = append(pbFullStatus.DnsServers, pbDnsState) + } + + return &pbFullStatus +} + func parsePeers(peers PeersStateOutput, rosenpassEnabled, rosenpassPermissive bool) string { var ( peersString = "" diff --git a/client/status/status_test.go b/client/status/status_test.go index f4585827b..b02d78d64 100644 --- a/client/status/status_test.go +++ b/client/status/status_test.go @@ -238,7 +238,7 @@ var overview = OutputOverview{ } func TestConversionFromFullStatusToOutputOverview(t *testing.T) { - convertedResult := ConvertToStatusOutputOverview(resp, false, "", nil, nil, nil, "", "") + convertedResult := ConvertToStatusOutputOverview(resp.GetFullStatus(), false, resp.GetDaemonVersion(), "", nil, nil, nil, "", "") assert.Equal(t, overview, convertedResult) } @@ -567,7 +567,6 @@ Quantum resistance: false Lazy connection: false SSH Server: Disabled Networks: 10.10.0.0/24 -Forwarding rules: 0 Peers count: 2/2 Connected `, lastConnectionUpdate1, lastHandshake1, lastConnectionUpdate2, lastHandshake2, runtime.GOOS, runtime.GOARCH, overview.CliVersion) @@ -592,7 +591,6 @@ Quantum resistance: false Lazy connection: false SSH Server: Disabled Networks: 10.10.0.0/24 -Forwarding rules: 0 Peers count: 2/2 Connected ` diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 5d955ed25..0290e17d5 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -1033,7 +1033,7 @@ func (s *serviceClient) onTrayReady() { s.mDown.Disable() systray.AddSeparator() - s.mSettings = systray.AddMenuItem("Settings", settingsMenuDescr) + s.mSettings = systray.AddMenuItem("Settings", disabledMenuDescr) s.mAllowSSH = s.mSettings.AddSubMenuItemCheckbox("Allow SSH", allowSSHMenuDescr, false) s.mAutoConnect = s.mSettings.AddSubMenuItemCheckbox("Connect on Startup", autoConnectMenuDescr, false) s.mEnableRosenpass = s.mSettings.AddSubMenuItemCheckbox("Enable Quantum-Resistance", quantumResistanceMenuDescr, false) @@ -1060,7 +1060,7 @@ func (s *serviceClient) onTrayReady() { } s.exitNodeMu.Lock() - s.mExitNode = systray.AddMenuItem("Exit Node", exitNodeMenuDescr) + s.mExitNode = systray.AddMenuItem("Exit Node", disabledMenuDescr) s.mExitNode.Disable() s.exitNodeMu.Unlock() @@ -1261,7 +1261,6 @@ func (s *serviceClient) setSettingsEnabled(enabled bool) { if s.mSettings != nil { if enabled { s.mSettings.Enable() - s.mSettings.SetTooltip(settingsMenuDescr) } else { s.mSettings.Hide() s.mSettings.SetTooltip("Settings are disabled by daemon") diff --git a/client/ui/const.go b/client/ui/const.go index 332282c17..48619be75 100644 --- a/client/ui/const.go +++ b/client/ui/const.go @@ -1,8 +1,6 @@ package main const ( - settingsMenuDescr = "Settings of the application" - profilesMenuDescr = "Manage your profiles" allowSSHMenuDescr = "Allow SSH connections" autoConnectMenuDescr = "Connect automatically when the service starts" quantumResistanceMenuDescr = "Enable post-quantum security via Rosenpass" @@ -11,7 +9,7 @@ const ( notificationsMenuDescr = "Enable notifications" advancedSettingsMenuDescr = "Advanced settings of the application" debugBundleMenuDescr = "Create and open debug information bundle" - exitNodeMenuDescr = "Select exit node for routing traffic" + disabledMenuDescr = "" networksMenuDescr = "Open the networks management window" latestVersionMenuDescr = "Download latest version" quitMenuDescr = "Quit the client app" diff --git a/client/ui/debug.go b/client/ui/debug.go index a057b2a85..29f73a66a 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -18,9 +18,7 @@ import ( "github.com/skratchdot/open-golang/open" "github.com/netbirdio/netbird/client/internal" - "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/proto" - nbstatus "github.com/netbirdio/netbird/client/status" uptypes "github.com/netbirdio/netbird/upload-server/types" ) @@ -291,19 +289,18 @@ func (s *serviceClient) handleRunForDuration( return } - statusOutput, err := s.collectDebugData(conn, initialState, params, progressUI) - if err != nil { + defer s.restoreServiceState(conn, initialState) + + if err := s.collectDebugData(conn, initialState, params, progressUI); err != nil { handleError(progressUI, err.Error()) return } - if err := s.createDebugBundleFromCollection(conn, params, statusOutput, progressUI); err != nil { + if err := s.createDebugBundleFromCollection(conn, params, progressUI); err != nil { handleError(progressUI, err.Error()) return } - s.restoreServiceState(conn, initialState) - progressUI.statusLabel.SetText("Bundle created successfully") } @@ -409,6 +406,10 @@ func (s *serviceClient) configureServiceForDebug( } time.Sleep(time.Second * 3) + if _, err := conn.StartCPUProfile(s.ctx, &proto.StartCPUProfileRequest{}); err != nil { + log.Warnf("failed to start CPU profiling: %v", err) + } + return nil } @@ -417,68 +418,37 @@ func (s *serviceClient) collectDebugData( state *debugInitialState, params *debugCollectionParams, progress *progressUI, -) (string, error) { +) error { ctx, cancel := context.WithTimeout(s.ctx, params.duration) defer cancel() var wg sync.WaitGroup startProgressTracker(ctx, &wg, params.duration, progress) if err := s.configureServiceForDebug(conn, state, params.enablePersistence); err != nil { - return "", err + return err } - pm := profilemanager.NewProfileManager() - var profName string - if activeProf, err := pm.GetActiveProfile(); err == nil { - profName = activeProf.Name - } - - postUpStatus, err := conn.Status(s.ctx, &proto.StatusRequest{GetFullPeerStatus: true}) - if err != nil { - log.Warnf("Failed to get post-up status: %v", err) - } - - var postUpStatusOutput string - if postUpStatus != nil { - overview := nbstatus.ConvertToStatusOutputOverview(postUpStatus, params.anonymize, "", nil, nil, nil, "", profName) - postUpStatusOutput = overview.FullDetailSummary() - } - headerPostUp := fmt.Sprintf("----- NetBird post-up - Timestamp: %s", time.Now().Format(time.RFC3339)) - statusOutput := fmt.Sprintf("%s\n%s", headerPostUp, postUpStatusOutput) - wg.Wait() progress.progressBar.Hide() progress.statusLabel.SetText("Collecting debug data...") - preDownStatus, err := conn.Status(s.ctx, &proto.StatusRequest{GetFullPeerStatus: true}) - if err != nil { - log.Warnf("Failed to get pre-down status: %v", err) + if _, err := conn.StopCPUProfile(s.ctx, &proto.StopCPUProfileRequest{}); err != nil { + log.Warnf("failed to stop CPU profiling: %v", err) } - var preDownStatusOutput string - if preDownStatus != nil { - overview := nbstatus.ConvertToStatusOutputOverview(preDownStatus, params.anonymize, "", nil, nil, nil, "", profName) - preDownStatusOutput = overview.FullDetailSummary() - } - headerPreDown := fmt.Sprintf("----- NetBird pre-down - Timestamp: %s - Duration: %s", - time.Now().Format(time.RFC3339), params.duration) - statusOutput = fmt.Sprintf("%s\n%s\n%s", statusOutput, headerPreDown, preDownStatusOutput) - - return statusOutput, nil + return nil } // Create the debug bundle with collected data func (s *serviceClient) createDebugBundleFromCollection( conn proto.DaemonServiceClient, params *debugCollectionParams, - statusOutput string, progress *progressUI, ) error { progress.statusLabel.SetText("Creating debug bundle with collected logs...") request := &proto.DebugBundleRequest{ Anonymize: params.anonymize, - Status: statusOutput, SystemInfo: params.systemInfo, } @@ -581,26 +551,8 @@ func (s *serviceClient) createDebugBundle(anonymize bool, systemInfo bool, uploa return nil, fmt.Errorf("get client: %v", err) } - pm := profilemanager.NewProfileManager() - var profName string - if activeProf, err := pm.GetActiveProfile(); err == nil { - profName = activeProf.Name - } - - statusResp, err := conn.Status(s.ctx, &proto.StatusRequest{GetFullPeerStatus: true}) - if err != nil { - log.Warnf("failed to get status for debug bundle: %v", err) - } - - var statusOutput string - if statusResp != nil { - overview := nbstatus.ConvertToStatusOutputOverview(statusResp, anonymize, "", nil, nil, nil, "", profName) - statusOutput = overview.FullDetailSummary() - } - request := &proto.DebugBundleRequest{ Anonymize: anonymize, - Status: statusOutput, SystemInfo: systemInfo, } diff --git a/client/ui/event_handler.go b/client/ui/event_handler.go index 9ffacd926..2216c8aeb 100644 --- a/client/ui/event_handler.go +++ b/client/ui/event_handler.go @@ -63,6 +63,8 @@ func (h *eventHandler) listen(ctx context.Context) { h.handleNetworksClick() case <-h.client.mNotifications.ClickedCh: h.handleNotificationsClick() + case <-systray.TrayOpenedCh: + h.client.updateExitNodes() } } } @@ -99,6 +101,8 @@ func (h *eventHandler) handleConnectClick() { func (h *eventHandler) handleDisconnectClick() { h.client.mDown.Disable() + h.client.exitNodeStates = []exitNodeState{} + if h.client.connectCancel != nil { log.Debugf("cancelling ongoing connect operation") h.client.connectCancel() diff --git a/client/ui/network.go b/client/ui/network.go index fb73efd7b..9a5ad7662 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -341,7 +341,6 @@ func (s *serviceClient) updateExitNodes() { log.Errorf("get client: %v", err) return } - exitNodes, err := s.getExitNodes(conn) if err != nil { log.Errorf("get exit nodes: %v", err) @@ -390,7 +389,7 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { if runtime.GOOS == "linux" || runtime.GOOS == "freebsd" { s.mExitNode.Remove() - s.mExitNode = systray.AddMenuItem("Exit Node", exitNodeMenuDescr) + s.mExitNode = systray.AddMenuItem("Exit Node", disabledMenuDescr) } var showDeselectAll bool diff --git a/client/wasm/cmd/main.go b/client/wasm/cmd/main.go index 2647c2f0d..26022ffc7 100644 --- a/client/wasm/cmd/main.go +++ b/client/wasm/cmd/main.go @@ -12,7 +12,6 @@ import ( "google.golang.org/protobuf/encoding/protojson" netbird "github.com/netbirdio/netbird/client/embed" - "github.com/netbirdio/netbird/client/proto" sshdetection "github.com/netbirdio/netbird/client/ssh/detection" nbstatus "github.com/netbirdio/netbird/client/status" "github.com/netbirdio/netbird/client/wasm/internal/http" @@ -350,12 +349,8 @@ func getStatusOverview(client *netbird.Client) (nbstatus.OutputOverview, error) } pbFullStatus := fullStatus.ToProto() - statusResp := &proto.StatusResponse{ - DaemonVersion: version.NetbirdVersion(), - FullStatus: pbFullStatus, - } - return nbstatus.ConvertToStatusOutputOverview(statusResp, false, "", nil, nil, nil, "", ""), nil + return nbstatus.ConvertToStatusOutputOverview(pbFullStatus, false, version.NetbirdVersion(), "", nil, nil, nil, "", ""), nil } // createStatusMethod creates the status method that returns JSON diff --git a/go.mod b/go.mod index 4d4abe72f..c00a4abb9 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( require ( fyne.io/fyne/v2 v2.7.0 - fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58 + fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible github.com/awnumar/memguard v0.23.0 github.com/aws/aws-sdk-go-v2 v1.36.3 @@ -69,8 +69,9 @@ require ( github.com/mdlayher/socket v0.5.1 github.com/miekg/dns v1.1.59 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847 + github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 + github.com/oapi-codegen/runtime v1.1.2 github.com/okta/okta-sdk-golang/v2 v2.18.0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -142,6 +143,7 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/awnumar/memcall v0.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect diff --git a/go.sum b/go.sum index 0f39581ae..61f97b662 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= fyne.io/fyne/v2 v2.7.0 h1:GvZSpE3X0liU/fqstInVvRsaboIVpIWQ4/sfjDGIGGQ= fyne.io/fyne/v2 v2.7.0/go.mod h1:xClVlrhxl7D+LT+BWYmcrW4Nf+dJTvkhnPgji7spAwE= -fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58 h1:eA5/u2XRd8OUkoMqEv3IBlFYSruNlXD8bRHDiqm0VNI= -fyne.io/systray v1.11.1-0.20250603113521-ca66a66d8b58/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= +fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9 h1:829+77I4TaMrcg9B3wf+gHhdSgoCVEgH2czlPXPbfj4= +fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AppsFlyer/go-sundheit v0.6.0 h1:d2hBvCjBSb2lUsEWGfPigr4MCOt04sxB+Rppl0yUMSk= @@ -35,12 +35,15 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible h1:hqcTK6ZISdip65SR792lwYJTa/axESA0889D3UlZbLo= github.com/TheJumpCloud/jcapi-go v3.0.0+incompatible/go.mod h1:6B1nuc1MUs6c62ODZDl7hVE5Pv7O2XGSkgg2olnq34I= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI= github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/awnumar/memcall v0.4.0 h1:B7hgZYdfH6Ot1Goaz8jGne/7i8xD4taZie/PNSFZ29g= github.com/awnumar/memcall v0.4.0/go.mod h1:8xOx1YbfyuCg3Fy6TO8DK0kZUua3V42/goA5Ru47E8w= github.com/awnumar/memguard v0.23.0 h1:sJ3a1/SWlcuKIQ7MV+R9p0Pvo9CWsMbGZvcZQtmc68A= @@ -87,6 +90,7 @@ github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE= github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -322,6 +326,7 @@ github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7X github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 h1:YLvr1eE6cdCqjOe972w/cYF+FjW34v27+9Vo5106B4M= github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25/go.mod h1:kLgvv7o6UM+0QSf0QjAse3wReFDsb9qbZJdfexWlrQw= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -403,8 +408,8 @@ github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944 h1:TDtJKmM6S github.com/netbirdio/go-netroute v0.0.0-20240611143515-f59b0e1d3944/go.mod h1:sHA6TRxjQ6RLbnI+3R4DZo2Eseg/iKiPRfNmcuNySVQ= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51 h1:Ov4qdafATOgGMB1wbSuh+0aAHcwz9hdvB6VZjh1mVMI= github.com/netbirdio/ice/v4 v4.0.0-20250908184934-6202be846b51/go.mod h1:ZSIbPdBn5hePO8CpF1PekH2SfpTxg1PDhEwtbqZS7R8= -github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847 h1:V0zsYYMU5d2UN1m9zOLPEZCGWpnhtkYcxQVi9Rrx3bY= -github.com/netbirdio/management-integrations/integrations v0.0.0-20251203183432-d5400f030847/go.mod h1:qzLCKeR253jtsWhfZTt4fyegI5zei32jKZykV+oSQOo= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f h1:CTBf0je/FpKr2lVSMZLak7m8aaWcS6ur4SOfhSSazFI= +github.com/netbirdio/management-integrations/integrations v0.0.0-20260122111742-a6f99668844f/go.mod h1:y7CxagMYzg9dgu+masRqYM7BQlOGA5Y8US85MCNFPlY= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 h1:3tHlFmhTdX9axERMVN63dqyFqnvuD+EMJHzM7mNGON8= github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20250805121659-6b4ac470ca45 h1:ujgviVYmx243Ksy7NdSwrdGPSRNE3pb8kEDSpH0QuAQ= @@ -418,6 +423,8 @@ github.com/nicksnyder/go-i18n/v2 v2.5.1/go.mod h1:DrhgsSDZxoAfvVrBVLXoxZn/pN5TXq github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= +github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= github.com/okta/okta-sdk-golang/v2 v2.18.0 h1:cfDasMb7CShbZvOrF6n+DnLevWwiHgedWMGJ8M8xKDc= github.com/okta/okta-sdk-golang/v2 v2.18.0/go.mod h1:dz30v3ctAiMb7jpsCngGfQUAEGm1/NsWT92uTbNDQIs= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -524,6 +531,7 @@ github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c h1:km8GpoQut05eY3GiYWEedbTT0qnSxrCjsVbb7yKY1KE= github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c/go.mod h1:cNQ3dwVJtS5Hmnjxy6AgTPd0Inb3pW05ftPSX7NZO7Q= github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef h1:Ch6Q+AZUxDBCVqdkI8FSpFyZDtCVBc2VmejdNrm5rRQ= diff --git a/idp/dex/connector.go b/idp/dex/connector.go new file mode 100644 index 000000000..cad682141 --- /dev/null +++ b/idp/dex/connector.go @@ -0,0 +1,356 @@ +// Package dex provides an embedded Dex OIDC identity provider. +package dex + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/dexidp/dex/storage" +) + +// ConnectorConfig represents the configuration for an identity provider connector +type ConnectorConfig struct { + // ID is the unique identifier for the connector + ID string + // Name is a human-readable name for the connector + Name string + // Type is the connector type (oidc, google, microsoft) + Type string + // Issuer is the OIDC issuer URL (for OIDC-based connectors) + Issuer string + // ClientID is the OAuth2 client ID + ClientID string + // ClientSecret is the OAuth2 client secret + ClientSecret string + // RedirectURI is the OAuth2 redirect URI + RedirectURI string +} + +// CreateConnector creates a new connector in Dex storage. +// It maps the connector config to the appropriate Dex connector type and configuration. +func (p *Provider) CreateConnector(ctx context.Context, cfg *ConnectorConfig) (*ConnectorConfig, error) { + // Fill in the redirect URI if not provided + if cfg.RedirectURI == "" { + cfg.RedirectURI = p.GetRedirectURI() + } + + storageConn, err := p.buildStorageConnector(cfg) + if err != nil { + return nil, fmt.Errorf("failed to build connector: %w", err) + } + + if err := p.storage.CreateConnector(ctx, storageConn); err != nil { + return nil, fmt.Errorf("failed to create connector: %w", err) + } + + p.logger.Info("connector created", "id", cfg.ID, "type", cfg.Type) + return cfg, nil +} + +// GetConnector retrieves a connector by ID from Dex storage. +func (p *Provider) GetConnector(ctx context.Context, id string) (*ConnectorConfig, error) { + conn, err := p.storage.GetConnector(ctx, id) + if err != nil { + if err == storage.ErrNotFound { + return nil, err + } + return nil, fmt.Errorf("failed to get connector: %w", err) + } + + return p.parseStorageConnector(conn) +} + +// ListConnectors returns all connectors from Dex storage (excluding the local connector). +func (p *Provider) ListConnectors(ctx context.Context) ([]*ConnectorConfig, error) { + connectors, err := p.storage.ListConnectors(ctx) + if err != nil { + return nil, fmt.Errorf("failed to list connectors: %w", err) + } + + result := make([]*ConnectorConfig, 0, len(connectors)) + for _, conn := range connectors { + // Skip the local password connector + if conn.ID == "local" && conn.Type == "local" { + continue + } + + cfg, err := p.parseStorageConnector(conn) + if err != nil { + p.logger.Warn("failed to parse connector", "id", conn.ID, "error", err) + continue + } + result = append(result, cfg) + } + + return result, nil +} + +// UpdateConnector updates an existing connector in Dex storage. +// It merges incoming updates with existing values to prevent data loss on partial updates. +func (p *Provider) UpdateConnector(ctx context.Context, cfg *ConnectorConfig) error { + if err := p.storage.UpdateConnector(ctx, cfg.ID, func(old storage.Connector) (storage.Connector, error) { + oldCfg, err := p.parseStorageConnector(old) + if err != nil { + return storage.Connector{}, fmt.Errorf("failed to parse existing connector: %w", err) + } + + mergeConnectorConfig(cfg, oldCfg) + + storageConn, err := p.buildStorageConnector(cfg) + if err != nil { + return storage.Connector{}, fmt.Errorf("failed to build connector: %w", err) + } + return storageConn, nil + }); err != nil { + return fmt.Errorf("failed to update connector: %w", err) + } + + p.logger.Info("connector updated", "id", cfg.ID, "type", cfg.Type) + return nil +} + +// mergeConnectorConfig preserves existing values for empty fields in the update. +func mergeConnectorConfig(cfg, oldCfg *ConnectorConfig) { + if cfg.ClientSecret == "" { + cfg.ClientSecret = oldCfg.ClientSecret + } + if cfg.RedirectURI == "" { + cfg.RedirectURI = oldCfg.RedirectURI + } + if cfg.Issuer == "" && cfg.Type == oldCfg.Type { + cfg.Issuer = oldCfg.Issuer + } + if cfg.ClientID == "" { + cfg.ClientID = oldCfg.ClientID + } + if cfg.Name == "" { + cfg.Name = oldCfg.Name + } +} + +// DeleteConnector removes a connector from Dex storage. +func (p *Provider) DeleteConnector(ctx context.Context, id string) error { + // Prevent deletion of the local connector + if id == "local" { + return fmt.Errorf("cannot delete the local password connector") + } + + if err := p.storage.DeleteConnector(ctx, id); err != nil { + return fmt.Errorf("failed to delete connector: %w", err) + } + + p.logger.Info("connector deleted", "id", id) + return nil +} + +// GetRedirectURI returns the default redirect URI for connectors. +func (p *Provider) GetRedirectURI() string { + if p.config == nil { + return "" + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer + "/callback" +} + +// buildStorageConnector creates a storage.Connector from ConnectorConfig. +// It handles the type-specific configuration for each connector type. +func (p *Provider) buildStorageConnector(cfg *ConnectorConfig) (storage.Connector, error) { + redirectURI := p.resolveRedirectURI(cfg.RedirectURI) + + var dexType string + var configData []byte + var err error + + switch cfg.Type { + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + dexType = "oidc" + configData, err = buildOIDCConnectorConfig(cfg, redirectURI) + case "google": + dexType = "google" + configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) + case "microsoft": + dexType = "microsoft" + configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) + default: + return storage.Connector{}, fmt.Errorf("unsupported connector type: %s", cfg.Type) + } + if err != nil { + return storage.Connector{}, err + } + + return storage.Connector{ID: cfg.ID, Type: dexType, Name: cfg.Name, Config: configData}, nil +} + +// resolveRedirectURI returns the redirect URI, using a default if not provided +func (p *Provider) resolveRedirectURI(redirectURI string) string { + if redirectURI != "" || p.config == nil { + return redirectURI + } + issuer := strings.TrimSuffix(p.config.Issuer, "/") + if !strings.HasSuffix(issuer, "/oauth2") { + issuer += "/oauth2" + } + return issuer + "/callback" +} + +// buildOIDCConnectorConfig creates config for OIDC-based connectors +func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { + oidcConfig := map[string]interface{}{ + "issuer": cfg.Issuer, + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + "scopes": []string{"openid", "profile", "email"}, + "insecureEnableGroups": true, + //some providers don't return email verified, so we need to skip it if not present (e.g., Entra, Okta, Duo) + "insecureSkipEmailVerified": true, + } + switch cfg.Type { + case "zitadel": + oidcConfig["getUserInfo"] = true + case "entra": + oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} + case "okta": + oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} + case "pocketid": + oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} + } + return encodeConnectorConfig(oidcConfig) +} + +// buildOAuth2ConnectorConfig creates config for OAuth2 connectors (google, microsoft) +func buildOAuth2ConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { + return encodeConnectorConfig(map[string]interface{}{ + "clientID": cfg.ClientID, + "clientSecret": cfg.ClientSecret, + "redirectURI": redirectURI, + }) +} + +// parseStorageConnector converts a storage.Connector back to ConnectorConfig. +// It infers the original identity provider type from the Dex connector type and ID. +func (p *Provider) parseStorageConnector(conn storage.Connector) (*ConnectorConfig, error) { + cfg := &ConnectorConfig{ + ID: conn.ID, + Name: conn.Name, + } + + if len(conn.Config) == 0 { + cfg.Type = conn.Type + return cfg, nil + } + + var configMap map[string]interface{} + if err := decodeConnectorConfig(conn.Config, &configMap); err != nil { + return nil, fmt.Errorf("failed to parse connector config: %w", err) + } + + // Extract common fields + if v, ok := configMap["clientID"].(string); ok { + cfg.ClientID = v + } + if v, ok := configMap["clientSecret"].(string); ok { + cfg.ClientSecret = v + } + if v, ok := configMap["redirectURI"].(string); ok { + cfg.RedirectURI = v + } + if v, ok := configMap["issuer"].(string); ok { + cfg.Issuer = v + } + + // Infer the original identity provider type from Dex connector type and ID + cfg.Type = inferIdentityProviderType(conn.Type, conn.ID, configMap) + + return cfg, nil +} + +// inferIdentityProviderType determines the original identity provider type +// based on the Dex connector type, connector ID, and configuration. +func inferIdentityProviderType(dexType, connectorID string, _ map[string]interface{}) string { + if dexType != "oidc" { + return dexType + } + return inferOIDCProviderType(connectorID) +} + +// inferOIDCProviderType infers the specific OIDC provider from connector ID +func inferOIDCProviderType(connectorID string) string { + connectorIDLower := strings.ToLower(connectorID) + for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak"} { + if strings.Contains(connectorIDLower, provider) { + return provider + } + } + return "oidc" +} + +// encodeConnectorConfig serializes connector config to JSON bytes. +func encodeConnectorConfig(config map[string]interface{}) ([]byte, error) { + return json.Marshal(config) +} + +// decodeConnectorConfig deserializes connector config from JSON bytes. +func decodeConnectorConfig(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// ensureLocalConnector creates a local (password) connector if it doesn't exist +func ensureLocalConnector(ctx context.Context, stor storage.Storage) error { + // Check specifically for the local connector + _, err := stor.GetConnector(ctx, "local") + if err == nil { + // Local connector already exists + return nil + } + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to get local connector: %w", err) + } + + // Create a local connector for password authentication + localConnector := storage.Connector{ + ID: "local", + Type: "local", + Name: "Email", + } + + if err := stor.CreateConnector(ctx, localConnector); err != nil { + return fmt.Errorf("failed to create local connector: %w", err) + } + + return nil +} + +// ensureStaticConnectors creates or updates static connectors in storage +func ensureStaticConnectors(ctx context.Context, stor storage.Storage, connectors []Connector) error { + for _, conn := range connectors { + storConn, err := conn.ToStorageConnector() + if err != nil { + return fmt.Errorf("failed to convert connector %s: %w", conn.ID, err) + } + _, err = stor.GetConnector(ctx, conn.ID) + if err == storage.ErrNotFound { + if err := stor.CreateConnector(ctx, storConn); err != nil { + return fmt.Errorf("failed to create connector %s: %w", conn.ID, err) + } + continue + } + if err != nil { + return fmt.Errorf("failed to get connector %s: %w", conn.ID, err) + } + if err := stor.UpdateConnector(ctx, conn.ID, func(old storage.Connector) (storage.Connector, error) { + old.Name = storConn.Name + old.Config = storConn.Config + return old, nil + }); err != nil { + return fmt.Errorf("failed to update connector %s: %w", conn.ID, err) + } + } + return nil +} diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 6a4fe7873..6c608dbf5 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -4,7 +4,6 @@ package dex import ( "context" "encoding/base64" - "encoding/json" "errors" "fmt" "log/slog" @@ -245,34 +244,6 @@ func ensureStaticClients(ctx context.Context, stor storage.Storage, clients []st return nil } -// ensureStaticConnectors creates or updates static connectors in storage -func ensureStaticConnectors(ctx context.Context, stor storage.Storage, connectors []Connector) error { - for _, conn := range connectors { - storConn, err := conn.ToStorageConnector() - if err != nil { - return fmt.Errorf("failed to convert connector %s: %w", conn.ID, err) - } - _, err = stor.GetConnector(ctx, conn.ID) - if errors.Is(err, storage.ErrNotFound) { - if err := stor.CreateConnector(ctx, storConn); err != nil { - return fmt.Errorf("failed to create connector %s: %w", conn.ID, err) - } - continue - } - if err != nil { - return fmt.Errorf("failed to get connector %s: %w", conn.ID, err) - } - if err := stor.UpdateConnector(ctx, conn.ID, func(old storage.Connector) (storage.Connector, error) { - old.Name = storConn.Name - old.Config = storConn.Config - return old, nil - }); err != nil { - return fmt.Errorf("failed to update connector %s: %w", conn.ID, err) - } - } - return nil -} - // buildDexConfig creates a server.Config with defaults applied func buildDexConfig(yamlConfig *YAMLConfig, stor storage.Storage, logger *slog.Logger) server.Config { cfg := yamlConfig.ToServerConfig(stor, logger) @@ -613,294 +584,37 @@ func (p *Provider) ListUsers(ctx context.Context) ([]storage.Password, error) { return p.storage.ListPasswords(ctx) } -// ensureLocalConnector creates a local (password) connector if none exists -func ensureLocalConnector(ctx context.Context, stor storage.Storage) error { - connectors, err := stor.ListConnectors(ctx) +// UpdateUserPassword updates the password for a user identified by userID. +// The userID can be either an encoded Dex ID (base64 protobuf) or a raw UUID. +// It verifies the current password before updating. +func (p *Provider) UpdateUserPassword(ctx context.Context, userID string, oldPassword, newPassword string) error { + // Get the user by ID to find their email + user, err := p.GetUserByID(ctx, userID) if err != nil { - return fmt.Errorf("failed to list connectors: %w", err) + return fmt.Errorf("failed to get user: %w", err) } - // If any connector exists, we're good - if len(connectors) > 0 { - return nil + // Verify old password + if err := bcrypt.CompareHashAndPassword(user.Hash, []byte(oldPassword)); err != nil { + return fmt.Errorf("current password is incorrect") } - // Create a local connector for password authentication - localConnector := storage.Connector{ - ID: "local", - Type: "local", - Name: "Email", - } - - if err := stor.CreateConnector(ctx, localConnector); err != nil { - return fmt.Errorf("failed to create local connector: %w", err) - } - - return nil -} - -// ConnectorConfig represents the configuration for an identity provider connector -type ConnectorConfig struct { - // ID is the unique identifier for the connector - ID string - // Name is a human-readable name for the connector - Name string - // Type is the connector type (oidc, google, microsoft) - Type string - // Issuer is the OIDC issuer URL (for OIDC-based connectors) - Issuer string - // ClientID is the OAuth2 client ID - ClientID string - // ClientSecret is the OAuth2 client secret - ClientSecret string - // RedirectURI is the OAuth2 redirect URI - RedirectURI string -} - -// CreateConnector creates a new connector in Dex storage. -// It maps the connector config to the appropriate Dex connector type and configuration. -func (p *Provider) CreateConnector(ctx context.Context, cfg *ConnectorConfig) (*ConnectorConfig, error) { - // Fill in the redirect URI if not provided - if cfg.RedirectURI == "" { - cfg.RedirectURI = p.GetRedirectURI() - } - - storageConn, err := p.buildStorageConnector(cfg) + // Hash the new password + newHash, err := bcrypt.GenerateFromPassword([]byte(newPassword), bcrypt.DefaultCost) if err != nil { - return nil, fmt.Errorf("failed to build connector: %w", err) + return fmt.Errorf("failed to hash new password: %w", err) } - if err := p.storage.CreateConnector(ctx, storageConn); err != nil { - return nil, fmt.Errorf("failed to create connector: %w", err) - } - - p.logger.Info("connector created", "id", cfg.ID, "type", cfg.Type) - return cfg, nil -} - -// GetConnector retrieves a connector by ID from Dex storage. -func (p *Provider) GetConnector(ctx context.Context, id string) (*ConnectorConfig, error) { - conn, err := p.storage.GetConnector(ctx, id) - if err != nil { - if err == storage.ErrNotFound { - return nil, err - } - return nil, fmt.Errorf("failed to get connector: %w", err) - } - - return p.parseStorageConnector(conn) -} - -// ListConnectors returns all connectors from Dex storage (excluding the local connector). -func (p *Provider) ListConnectors(ctx context.Context) ([]*ConnectorConfig, error) { - connectors, err := p.storage.ListConnectors(ctx) - if err != nil { - return nil, fmt.Errorf("failed to list connectors: %w", err) - } - - result := make([]*ConnectorConfig, 0, len(connectors)) - for _, conn := range connectors { - // Skip the local password connector - if conn.ID == "local" && conn.Type == "local" { - continue - } - - cfg, err := p.parseStorageConnector(conn) - if err != nil { - p.logger.Warn("failed to parse connector", "id", conn.ID, "error", err) - continue - } - result = append(result, cfg) - } - - return result, nil -} - -// UpdateConnector updates an existing connector in Dex storage. -func (p *Provider) UpdateConnector(ctx context.Context, cfg *ConnectorConfig) error { - storageConn, err := p.buildStorageConnector(cfg) - if err != nil { - return fmt.Errorf("failed to build connector: %w", err) - } - - if err := p.storage.UpdateConnector(ctx, cfg.ID, func(old storage.Connector) (storage.Connector, error) { - return storageConn, nil - }); err != nil { - return fmt.Errorf("failed to update connector: %w", err) - } - - p.logger.Info("connector updated", "id", cfg.ID, "type", cfg.Type) - return nil -} - -// DeleteConnector removes a connector from Dex storage. -func (p *Provider) DeleteConnector(ctx context.Context, id string) error { - // Prevent deletion of the local connector - if id == "local" { - return fmt.Errorf("cannot delete the local password connector") - } - - if err := p.storage.DeleteConnector(ctx, id); err != nil { - return fmt.Errorf("failed to delete connector: %w", err) - } - - p.logger.Info("connector deleted", "id", id) - return nil -} - -// buildStorageConnector creates a storage.Connector from ConnectorConfig. -// It handles the type-specific configuration for each connector type. -func (p *Provider) buildStorageConnector(cfg *ConnectorConfig) (storage.Connector, error) { - redirectURI := p.resolveRedirectURI(cfg.RedirectURI) - - var dexType string - var configData []byte - var err error - - switch cfg.Type { - case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": - dexType = "oidc" - configData, err = buildOIDCConnectorConfig(cfg, redirectURI) - case "google": - dexType = "google" - configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) - case "microsoft": - dexType = "microsoft" - configData, err = buildOAuth2ConnectorConfig(cfg, redirectURI) - default: - return storage.Connector{}, fmt.Errorf("unsupported connector type: %s", cfg.Type) - } - if err != nil { - return storage.Connector{}, err - } - - return storage.Connector{ID: cfg.ID, Type: dexType, Name: cfg.Name, Config: configData}, nil -} - -// resolveRedirectURI returns the redirect URI, using a default if not provided -func (p *Provider) resolveRedirectURI(redirectURI string) string { - if redirectURI != "" || p.config == nil { - return redirectURI - } - issuer := strings.TrimSuffix(p.config.Issuer, "/") - if !strings.HasSuffix(issuer, "/oauth2") { - issuer += "/oauth2" - } - return issuer + "/callback" -} - -// buildOIDCConnectorConfig creates config for OIDC-based connectors -func buildOIDCConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { - oidcConfig := map[string]interface{}{ - "issuer": cfg.Issuer, - "clientID": cfg.ClientID, - "clientSecret": cfg.ClientSecret, - "redirectURI": redirectURI, - "scopes": []string{"openid", "profile", "email"}, - "insecureEnableGroups": true, - } - switch cfg.Type { - case "zitadel": - oidcConfig["getUserInfo"] = true - case "entra": - oidcConfig["insecureSkipEmailVerified"] = true - oidcConfig["claimMapping"] = map[string]string{"email": "preferred_username"} - case "okta": - oidcConfig["insecureSkipEmailVerified"] = true - oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} - case "pocketid": - oidcConfig["scopes"] = []string{"openid", "profile", "email", "groups"} - } - return encodeConnectorConfig(oidcConfig) -} - -// buildOAuth2ConnectorConfig creates config for OAuth2 connectors (google, microsoft) -func buildOAuth2ConnectorConfig(cfg *ConnectorConfig, redirectURI string) ([]byte, error) { - return encodeConnectorConfig(map[string]interface{}{ - "clientID": cfg.ClientID, - "clientSecret": cfg.ClientSecret, - "redirectURI": redirectURI, + // Update the password in storage + err = p.storage.UpdatePassword(ctx, user.Email, func(old storage.Password) (storage.Password, error) { + old.Hash = newHash + return old, nil }) -} - -// parseStorageConnector converts a storage.Connector back to ConnectorConfig. -// It infers the original identity provider type from the Dex connector type and ID. -func (p *Provider) parseStorageConnector(conn storage.Connector) (*ConnectorConfig, error) { - cfg := &ConnectorConfig{ - ID: conn.ID, - Name: conn.Name, + if err != nil { + return fmt.Errorf("failed to update password: %w", err) } - if len(conn.Config) == 0 { - cfg.Type = conn.Type - return cfg, nil - } - - var configMap map[string]interface{} - if err := decodeConnectorConfig(conn.Config, &configMap); err != nil { - return nil, fmt.Errorf("failed to parse connector config: %w", err) - } - - // Extract common fields - if v, ok := configMap["clientID"].(string); ok { - cfg.ClientID = v - } - if v, ok := configMap["clientSecret"].(string); ok { - cfg.ClientSecret = v - } - if v, ok := configMap["redirectURI"].(string); ok { - cfg.RedirectURI = v - } - if v, ok := configMap["issuer"].(string); ok { - cfg.Issuer = v - } - - // Infer the original identity provider type from Dex connector type and ID - cfg.Type = inferIdentityProviderType(conn.Type, conn.ID, configMap) - - return cfg, nil -} - -// inferIdentityProviderType determines the original identity provider type -// based on the Dex connector type, connector ID, and configuration. -func inferIdentityProviderType(dexType, connectorID string, _ map[string]interface{}) string { - if dexType != "oidc" { - return dexType - } - return inferOIDCProviderType(connectorID) -} - -// inferOIDCProviderType infers the specific OIDC provider from connector ID -func inferOIDCProviderType(connectorID string) string { - connectorIDLower := strings.ToLower(connectorID) - for _, provider := range []string{"pocketid", "zitadel", "entra", "okta", "authentik", "keycloak"} { - if strings.Contains(connectorIDLower, provider) { - return provider - } - } - return "oidc" -} - -// encodeConnectorConfig serializes connector config to JSON bytes. -func encodeConnectorConfig(config map[string]interface{}) ([]byte, error) { - return json.Marshal(config) -} - -// decodeConnectorConfig deserializes connector config from JSON bytes. -func decodeConnectorConfig(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// GetRedirectURI returns the default redirect URI for connectors. -func (p *Provider) GetRedirectURI() string { - if p.config == nil { - return "" - } - issuer := strings.TrimSuffix(p.config.Issuer, "/") - if !strings.HasSuffix(issuer, "/oauth2") { - issuer += "/oauth2" - } - return issuer + "/callback" + return nil } // GetIssuer returns the OIDC issuer URL. diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 5a9488fad..25599997c 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -9,6 +9,16 @@ set -e # Sed pattern to strip base64 padding characters SED_STRIP_PADDING='s/=//g' +# Constants for repeated string literals +readonly MSG_STARTING_SERVICES="\nStarting NetBird services\n" +readonly MSG_DONE="\nDone!\n" +readonly MSG_NEXT_STEPS="Next steps:" +readonly MSG_SEPARATOR="==========================================" + +############################################ +# Utility Functions +############################################ + check_docker_compose() { if command -v docker-compose &> /dev/null then @@ -72,13 +82,103 @@ read_nb_domain() { return 0 } -get_turn_external_ip() { - TURN_EXTERNAL_IP_CONFIG="#external-ip=" - IP=$(curl -s -4 https://jsonip.com | jq -r '.ip') - if [[ "x-$IP" != "x-" ]]; then - TURN_EXTERNAL_IP_CONFIG="external-ip=$IP" +read_reverse_proxy_type() { + echo "" > /dev/stderr + echo "Which reverse proxy will you use?" > /dev/stderr + echo " [0] Built-in Caddy (recommended - automatic TLS)" > /dev/stderr + echo " [1] Traefik (labels added to containers)" > /dev/stderr + echo " [2] Nginx (generates config template)" > /dev/stderr + echo " [3] Nginx Proxy Manager (generates config + instructions)" > /dev/stderr + echo " [4] External Caddy (generates Caddyfile snippet)" > /dev/stderr + echo " [5] Other/Manual (displays setup documentation)" > /dev/stderr + echo "" > /dev/stderr + echo -n "Enter choice [0-5] (default: 0): " > /dev/stderr + read -r CHOICE < /dev/tty + + if [[ -z "$CHOICE" ]]; then + CHOICE="0" fi - echo "$TURN_EXTERNAL_IP_CONFIG" + + if [[ ! "$CHOICE" =~ ^[0-5]$ ]]; then + echo "Invalid choice. Please enter a number between 0 and 5." > /dev/stderr + read_reverse_proxy_type + return + fi + + echo "$CHOICE" + return 0 +} + +read_traefik_network() { + echo "" > /dev/stderr + echo "If you have an existing Traefik instance, enter its external network name." > /dev/stderr + echo -n "External network (leave empty to create 'netbird' network): " > /dev/stderr + read -r NETWORK < /dev/tty + echo "$NETWORK" + return 0 +} + +read_traefik_entrypoint() { + echo "" > /dev/stderr + echo "Enter the name of your Traefik HTTPS entrypoint." > /dev/stderr + echo -n "HTTPS entrypoint name (default: websecure): " > /dev/stderr + read -r ENTRYPOINT < /dev/tty + if [[ -z "$ENTRYPOINT" ]]; then + ENTRYPOINT="websecure" + fi + echo "$ENTRYPOINT" + return 0 +} + +read_traefik_certresolver() { + echo "" > /dev/stderr + echo "Enter the name of your Traefik certificate resolver (for automatic TLS)." > /dev/stderr + echo "Leave empty if you handle TLS termination elsewhere or use a wildcard cert." > /dev/stderr + echo -n "Certificate resolver name (e.g., letsencrypt): " > /dev/stderr + read -r RESOLVER < /dev/tty + echo "$RESOLVER" + return 0 +} + +read_port_binding_preference() { + echo "" > /dev/stderr + echo "Should container ports be bound to localhost only (127.0.0.1)?" > /dev/stderr + echo "Choose 'yes' if your reverse proxy runs on the same host (more secure)." > /dev/stderr + echo -n "Bind to localhost only? [Y/n]: " > /dev/stderr + read -r CHOICE < /dev/tty + + if [[ "$CHOICE" =~ ^[Nn]$ ]]; then + echo "false" + else + echo "true" + fi + return 0 +} + +read_proxy_docker_network() { + local proxy_name="$1" + echo "" > /dev/stderr + echo "Is ${proxy_name} running in Docker?" > /dev/stderr + echo "If yes, enter the Docker network ${proxy_name} is on (NetBird will join it)." > /dev/stderr + echo -n "Docker network (leave empty if not in Docker): " > /dev/stderr + read -r NETWORK < /dev/tty + echo "$NETWORK" + return 0 +} + +get_bind_address() { + if [[ "$BIND_LOCALHOST_ONLY" == "true" ]]; then + echo "127.0.0.1" + else + echo "0.0.0.0" + fi + return 0 +} + +get_upstream_host() { + # Always return 127.0.0.1 for health checks and upstream targets + # Cannot use 0.0.0.0 as a connection target + echo "127.0.0.1" return 0 } @@ -106,20 +206,67 @@ wait_management() { return 0 } -init_environment() { +wait_management_direct() { + set +e + local upstream_host=$(get_upstream_host) + echo -n "Waiting for Management server to become ready" + counter=1 + while true; do + # Check the embedded IdP endpoint directly (no reverse proxy) + if curl -sk -f -o /dev/null "http://${upstream_host}:${MANAGEMENT_HOST_PORT}/oauth2/.well-known/openid-configuration" 2>/dev/null; then + break + fi + if [[ $counter -eq 60 ]]; then + echo "" + echo "Taking too long. Checking logs..." + $DOCKER_COMPOSE_COMMAND logs --tail=20 management + fi + echo -n " ." + sleep 2 + counter=$((counter + 1)) + done + echo " done" + set -e + return 0 +} + +############################################ +# Initialization and Configuration +############################################ + +initialize_default_values() { CADDY_SECURE_DOMAIN="" NETBIRD_PORT=80 NETBIRD_HTTP_PROTOCOL="http" NETBIRD_RELAY_PROTO="rel" - TURN_USER="self" - TURN_PASSWORD=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") NETBIRD_RELAY_AUTH_SECRET=$(openssl rand -base64 32 | sed "$SED_STRIP_PADDING") # Note: DataStoreEncryptionKey must keep base64 padding (=) for Go's base64.StdEncoding DATASTORE_ENCRYPTION_KEY=$(openssl rand -base64 32) - TURN_MIN_PORT=49152 - TURN_MAX_PORT=65535 - TURN_EXTERNAL_IP_CONFIG=$(get_turn_external_ip) + NETBIRD_STUN_PORT=3478 + # Docker images + CADDY_IMAGE="caddy" + DASHBOARD_IMAGE="netbirdio/dashboard:latest" + SIGNAL_IMAGE="netbirdio/signal:latest" + RELAY_IMAGE="netbirdio/relay:latest" + MANAGEMENT_IMAGE="netbirdio/management:latest" + + # Reverse proxy configuration + REVERSE_PROXY_TYPE="0" + TRAEFIK_EXTERNAL_NETWORK="" + TRAEFIK_ENTRYPOINT="websecure" + TRAEFIK_CERTRESOLVER="" + DASHBOARD_HOST_PORT="8080" + MANAGEMENT_HOST_PORT="8081" + SIGNAL_HOST_PORT="8083" + SIGNAL_GRPC_PORT="10000" + RELAY_HOST_PORT="8084" + BIND_LOCALHOST_ONLY="true" + EXTERNAL_PROXY_NETWORK="" + return 0 +} + +configure_domain() { if ! check_nb_domain "$NETBIRD_DOMAIN"; then NETBIRD_DOMAIN=$(read_nb_domain) fi @@ -132,41 +279,168 @@ init_environment() { NETBIRD_HTTP_PROTOCOL="https" NETBIRD_RELAY_PROTO="rels" fi + return 0 +} - check_jq +configure_reverse_proxy() { + # Prompt for reverse proxy type + REVERSE_PROXY_TYPE=$(read_reverse_proxy_type) - DOCKER_COMPOSE_COMMAND=$(check_docker_compose) + # Handle Traefik-specific prompts + if [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then + TRAEFIK_EXTERNAL_NETWORK=$(read_traefik_network) + TRAEFIK_ENTRYPOINT=$(read_traefik_entrypoint) + TRAEFIK_CERTRESOLVER=$(read_traefik_certresolver) + fi + # Handle port binding for external proxy options (2-5) + if [[ "$REVERSE_PROXY_TYPE" -ge 2 ]]; then + BIND_LOCALHOST_ONLY=$(read_port_binding_preference) + fi + + # Handle Docker network prompts for external proxies (options 2-4) + case "$REVERSE_PROXY_TYPE" in + 2) EXTERNAL_PROXY_NETWORK=$(read_proxy_docker_network "Nginx") ;; + 3) EXTERNAL_PROXY_NETWORK=$(read_proxy_docker_network "Nginx Proxy Manager") ;; + 4) EXTERNAL_PROXY_NETWORK=$(read_proxy_docker_network "Caddy") ;; + *) ;; # No network prompt for other options + esac + return 0 +} + +check_existing_installation() { if [[ -f management.json ]]; then echo "Generated files already exist, if you want to reinitialize the environment, please remove them first." echo "You can use the following commands:" echo " $DOCKER_COMPOSE_COMMAND down --volumes # to remove all containers and volumes" - echo " rm -f docker-compose.yml Caddyfile dashboard.env turnserver.conf management.json relay.env" + echo " rm -f docker-compose.yml Caddyfile dashboard.env management.json relay.env nginx-netbird.conf caddyfile-netbird.txt npm-advanced-config.txt" echo "Be aware that this will remove all data from the database, and you will have to reconfigure the dashboard." exit 1 fi - - echo Rendering initial files... - render_docker_compose > docker-compose.yml - render_caddyfile > Caddyfile - render_dashboard_env > dashboard.env - render_management_json > management.json - render_turn_server_conf > turnserver.conf - render_relay_env > relay.env - - echo -e "\nStarting NetBird services\n" - $DOCKER_COMPOSE_COMMAND up -d - - # Wait for management (and embedded IdP) to be ready - sleep 3 - wait_management - - echo -e "\nDone!\n" - echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" - echo "Follow the onboarding steps to set up your NetBird instance." return 0 } +generate_configuration_files() { + echo Rendering initial files... + + # Render docker-compose and proxy config based on selection + case "$REVERSE_PROXY_TYPE" in + 0) + render_docker_compose > docker-compose.yml + render_caddyfile > Caddyfile + ;; + 1) + render_docker_compose_traefik > docker-compose.yml + ;; + 2) + render_docker_compose_exposed_ports > docker-compose.yml + render_nginx_conf > nginx-netbird.conf + ;; + 3) + render_docker_compose_exposed_ports > docker-compose.yml + render_npm_advanced_config > npm-advanced-config.txt + ;; + 4) + render_docker_compose_exposed_ports > docker-compose.yml + render_external_caddyfile > caddyfile-netbird.txt + ;; + 5) + render_docker_compose_exposed_ports > docker-compose.yml + ;; + *) + echo "Invalid reverse proxy type: $REVERSE_PROXY_TYPE" > /dev/stderr + exit 1 + ;; + esac + + # Common files for all configurations + render_dashboard_env > dashboard.env + render_management_json > management.json + render_relay_env > relay.env + return 0 +} + +start_services_and_show_instructions() { + # For built-in Caddy and Traefik, start containers immediately + # For NPM, start containers first (NPM needs services running to create proxy) + # For other external proxies, show instructions first and wait for user confirmation + if [[ "$REVERSE_PROXY_TYPE" == "0" ]]; then + # Built-in Caddy - handles everything automatically + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management + + echo -e "$MSG_DONE" + print_post_setup_instructions + elif [[ "$REVERSE_PROXY_TYPE" == "1" ]]; then + # Traefik - start containers first, then show instructions + # Traefik discovers services via Docker labels, so containers must be running + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_direct + + echo -e "$MSG_DONE" + print_post_setup_instructions + echo "" + echo "NetBird containers are running. Once Traefik is connected, access the dashboard at:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + elif [[ "$REVERSE_PROXY_TYPE" == "3" ]]; then + # NPM - start containers first, then show instructions + # NPM requires backend services to be running before creating proxy hosts + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_direct + + echo -e "$MSG_DONE" + print_post_setup_instructions + echo "" + echo "NetBird containers are running. Configure NPM as shown above, then access:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + else + # External proxies (nginx, external Caddy, other) - need manual config first + print_post_setup_instructions + + echo "" + echo -n "Press Enter when your reverse proxy is configured (or Ctrl+C to exit)... " + read -r < /dev/tty + + echo -e "$MSG_STARTING_SERVICES" + $DOCKER_COMPOSE_COMMAND up -d + + sleep 3 + wait_management_direct + + echo -e "$MSG_DONE" + echo "NetBird is now running. Access the dashboard at:" + echo " $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" + fi + return 0 +} + +init_environment() { + initialize_default_values + configure_domain + configure_reverse_proxy + + check_jq + DOCKER_COMPOSE_COMMAND=$(check_docker_compose) + + check_existing_installation + generate_configuration_files + start_services_and_show_instructions + return 0 +} + +############################################ +# Configuration File Renderers +############################################ + render_caddyfile() { cat < ${upstream_host}:${RELAY_HOST_PORT}" + echo " (HTTP with WebSocket upgrade)" + echo "" + echo " /ws-proxy/signal* -> ${upstream_host}:${SIGNAL_HOST_PORT}" + echo " (HTTP with WebSocket upgrade)" + echo "" + echo " /signalexchange.SignalExchange/* -> ${upstream_host}:${SIGNAL_GRPC_PORT}" + echo " (gRPC/h2c - plaintext HTTP/2)" + echo "" + echo " /api/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP)" + echo "" + echo " /ws-proxy/management* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP with WebSocket upgrade)" + echo "" + echo " /management.ManagementService/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (gRPC/h2c - plaintext HTTP/2)" + echo "" + echo " /oauth2/* -> ${upstream_host}:${MANAGEMENT_HOST_PORT}" + echo " (HTTP - embedded IdP)" + echo "" + echo " /* -> ${upstream_host}:${DASHBOARD_HOST_PORT}" + echo " (HTTP - catch-all for dashboard)" + echo "" + echo "IMPORTANT: gRPC routes require HTTP/2 (h2c) upstream support." + echo "Long-running connections need extended timeouts (recommend 1 day)." + return 0 +} + +print_post_setup_instructions() { + case "$REVERSE_PROXY_TYPE" in + 0) + print_caddy_instructions + ;; + 1) + print_traefik_instructions + ;; + 2) + print_nginx_instructions + ;; + 3) + print_npm_instructions + ;; + 4) + print_external_caddy_instructions + ;; + 5) + print_manual_instructions + ;; + *) + echo "Unknown reverse proxy type: $REVERSE_PROXY_TYPE" > /dev/stderr + ;; + esac + return 0 +} + init_environment diff --git a/management/internals/controllers/network_map/controller/controller.go b/management/internals/controllers/network_map/controller/controller.go index d46737c26..5ae64e9f1 100644 --- a/management/internals/controllers/network_map/controller/controller.go +++ b/management/internals/controllers/network_map/controller/controller.go @@ -856,3 +856,7 @@ func (c *Controller) GetNetworkMap(ctx context.Context, peerID string) (*types.N func (c *Controller) DisconnectPeers(ctx context.Context, accountId string, peerIDs []string) { c.peersUpdateManager.CloseChannels(ctx, peerIDs) } + +func (c *Controller) TrackEphemeralPeer(ctx context.Context, peer *nbpeer.Peer) { + c.EphemeralPeersManager.OnPeerDisconnected(ctx, peer) +} diff --git a/management/internals/controllers/network_map/interface.go b/management/internals/controllers/network_map/interface.go index b1de7d017..64caac861 100644 --- a/management/internals/controllers/network_map/interface.go +++ b/management/internals/controllers/network_map/interface.go @@ -36,4 +36,6 @@ type Controller interface { DisconnectPeers(ctx context.Context, accountId string, peerIDs []string) OnPeerConnected(ctx context.Context, accountID string, peerID string) (chan *UpdateMessage, error) OnPeerDisconnected(ctx context.Context, accountID string, peerID string) + + TrackEphemeralPeer(ctx context.Context, peer *nbpeer.Peer) } diff --git a/management/internals/controllers/network_map/interface_mock.go b/management/internals/controllers/network_map/interface_mock.go index 5a98eefa8..4e86d2973 100644 --- a/management/internals/controllers/network_map/interface_mock.go +++ b/management/internals/controllers/network_map/interface_mock.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ./interface.go +// Source: management/internals/controllers/network_map/interface.go // // Generated by this command: // -// mockgen -package network_map -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod +// mockgen -package network_map -destination=management/internals/controllers/network_map/interface_mock.go -source=management/internals/controllers/network_map/interface.go -build_flags=-mod=mod // // Package network_map is a generated GoMock package. @@ -211,6 +211,18 @@ func (mr *MockControllerMockRecorder) StartWarmup(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartWarmup", reflect.TypeOf((*MockController)(nil).StartWarmup), arg0) } +// TrackEphemeralPeer mocks base method. +func (m *MockController) TrackEphemeralPeer(ctx context.Context, arg1 *peer.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TrackEphemeralPeer", ctx, arg1) +} + +// TrackEphemeralPeer indicates an expected call of TrackEphemeralPeer. +func (mr *MockControllerMockRecorder) TrackEphemeralPeer(ctx, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TrackEphemeralPeer", reflect.TypeOf((*MockController)(nil).TrackEphemeralPeer), ctx, arg1) +} + // UpdateAccountPeer mocks base method. func (m *MockController) UpdateAccountPeer(ctx context.Context, accountId, peerId string) error { m.ctrl.T.Helper() diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index 4935c608e..1551689b4 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -31,6 +31,7 @@ type Manager interface { SetNetworkMapController(networkMapController network_map.Controller) SetIntegratedPeerValidator(integratedPeerValidator integrated_validator.IntegratedValidator) SetAccountManager(accountManager account.Manager) + GetPeerID(ctx context.Context, peerKey string) (string, error) } type managerImpl struct { @@ -167,3 +168,7 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs return nil } + +func (m *managerImpl) GetPeerID(ctx context.Context, peerKey string) (string, error) { + return m.store.GetPeerIDByKey(ctx, store.LockingStrengthNone, peerKey) +} diff --git a/management/internals/modules/peers/manager_mock.go b/management/internals/modules/peers/manager_mock.go index 2e3651e88..6feedca2e 100644 --- a/management/internals/modules/peers/manager_mock.go +++ b/management/internals/modules/peers/manager_mock.go @@ -97,6 +97,21 @@ func (mr *MockManagerMockRecorder) GetPeerAccountID(ctx, peerID interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerAccountID", reflect.TypeOf((*MockManager)(nil).GetPeerAccountID), ctx, peerID) } +// GetPeerID mocks base method. +func (m *MockManager) GetPeerID(ctx context.Context, peerKey string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPeerID", ctx, peerKey) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPeerID indicates an expected call of GetPeerID. +func (mr *MockManagerMockRecorder) GetPeerID(ctx, peerKey interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerID", reflect.TypeOf((*MockManager)(nil).GetPeerID), ctx, peerKey) +} + // GetPeersByGroupIDs mocks base method. func (m *MockManager) GetPeersByGroupIDs(ctx context.Context, accountID string, groupsIDs []string) ([]*peer.Peer, error) { m.ctrl.T.Helper() diff --git a/management/internals/server/boot.go b/management/internals/server/boot.go index 27ff18c47..f42c40714 100644 --- a/management/internals/server/boot.go +++ b/management/internals/server/boot.go @@ -144,7 +144,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server { } gRPCAPIHandler := grpc.NewServer(gRPCOpts...) - srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider()) + srv, err := nbgrpc.NewServer(s.Config, s.AccountManager(), s.SettingsManager(), s.JobManager(), s.SecretsManager(), s.Metrics(), s.AuthManager(), s.IntegratedValidator(), s.NetworkMapController(), s.OAuthConfigProvider()) if err != nil { log.Fatalf("failed to create management server: %v", err) } diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 9f35d436f..4ea86900a 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -6,6 +6,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/management-integrations/integrations" + "github.com/netbirdio/netbird/management/internals/controllers/network_map" nmapcontroller "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" @@ -16,6 +17,7 @@ import ( "github.com/netbirdio/netbird/management/server/auth" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" ) func (s *BaseServer) PeersUpdateManager() network_map.PeersUpdateManager { @@ -24,6 +26,12 @@ func (s *BaseServer) PeersUpdateManager() network_map.PeersUpdateManager { }) } +func (s *BaseServer) JobManager() *job.Manager { + return Create(s, func() *job.Manager { + return job.NewJobManager(s.Metrics(), s.Store(), s.PeersManager()) + }) +} + func (s *BaseServer) IntegratedValidator() integrated_validator.IntegratedValidator { return Create(s, func() integrated_validator.IntegratedValidator { integratedPeerValidator, err := integrations.NewIntegratedValidator( diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index adcfb1af0..bd6a69cfa 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -89,7 +89,7 @@ func (s *BaseServer) PeersManager() peers.Manager { func (s *BaseServer) AccountManager() account.Manager { return Create(s, func() account.Manager { - accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy) + accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.JobManager(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy) if err != nil { log.Fatalf("failed to create account manager: %v", err) } diff --git a/management/internals/shared/grpc/conversion_test.go b/management/internals/shared/grpc/conversion_test.go index 95ad05eec..1e75caf95 100644 --- a/management/internals/shared/grpc/conversion_test.go +++ b/management/internals/shared/grpc/conversion_test.go @@ -195,6 +195,7 @@ func TestBuildJWTConfig_Audiences(t *testing.T) { assert.NotNil(t, result) assert.Equal(t, tc.expectedAudiences, result.Audiences, "audiences should match expected") + //nolint:staticcheck // SA1019: Testing backwards compatibility - Audience field must still be populated assert.Equal(t, tc.expectedAudience, result.Audience, "audience should match expected") }) } diff --git a/management/internals/shared/grpc/server.go b/management/internals/shared/grpc/server.go index 801c15158..32049d044 100644 --- a/management/internals/shared/grpc/server.go +++ b/management/internals/shared/grpc/server.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io" "net" "net/netip" "os" @@ -26,6 +27,7 @@ import ( "github.com/netbirdio/netbird/management/internals/controllers/network_map" nbconfig "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/store" @@ -57,6 +59,7 @@ type Server struct { accountManager account.Manager settingsManager settings.Manager proto.UnimplementedManagementServiceServer + jobManager *job.Manager config *nbconfig.Config secretsManager SecretsManager appMetrics telemetry.AppMetrics @@ -82,6 +85,7 @@ func NewServer( config *nbconfig.Config, accountManager account.Manager, settingsManager settings.Manager, + jobManager *job.Manager, secretsManager SecretsManager, appMetrics telemetry.AppMetrics, authManager auth.Manager, @@ -114,6 +118,7 @@ func NewServer( } return &Server{ + jobManager: jobManager, accountManager: accountManager, settingsManager: settingsManager, config: config, @@ -169,6 +174,40 @@ func getRealIP(ctx context.Context) net.IP { return nil } +func (s *Server) Job(srv proto.ManagementService_JobServer) error { + reqStart := time.Now() + ctx := srv.Context() + + peerKey, err := s.handleHandshake(ctx, srv) + if err != nil { + return err + } + + accountID, err := s.accountManager.GetAccountIDForPeerKey(ctx, peerKey.String()) + if err != nil { + // nolint:staticcheck + ctx = context.WithValue(ctx, nbContext.AccountIDKey, "UNKNOWN") + log.WithContext(ctx).Tracef("peer %s is not registered", peerKey.String()) + if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound { + return status.Errorf(codes.PermissionDenied, "peer is not registered") + } + return err + } + // nolint:staticcheck + ctx = context.WithValue(ctx, nbContext.AccountIDKey, accountID) + peer, err := s.accountManager.GetStore().GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, peerKey.String()) + if err != nil { + return status.Errorf(codes.Unauthenticated, "peer is not registered") + } + + s.startResponseReceiver(ctx, srv) + + updates := s.jobManager.CreateJobChannel(ctx, accountID, peer.ID) + log.WithContext(ctx).Debugf("Job: took %v", time.Since(reqStart)) + + return s.sendJobsLoop(ctx, accountID, peerKey, peer, updates, srv) +} + // Sync validates the existence of a connecting peer, sends an initial state (all available for the connecting peers) and // notifies the connected peer of any updates (e.g. new peers under the same account) func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_SyncServer) error { @@ -193,6 +232,9 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S userID, err := s.accountManager.GetUserIDByPeerKey(ctx, peerKey.String()) if err != nil { s.syncSem.Add(-1) + if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound { + return status.Errorf(codes.PermissionDenied, "peer is not registered") + } return mapError(ctx, err) } @@ -289,6 +331,70 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S return s.handleUpdates(ctx, accountID, peerKey, peer, updates, srv) } +func (s *Server) handleHandshake(ctx context.Context, srv proto.ManagementService_JobServer) (wgtypes.Key, error) { + hello, err := srv.Recv() + if err != nil { + return wgtypes.Key{}, status.Errorf(codes.InvalidArgument, "missing hello: %v", err) + } + + jobReq := &proto.JobRequest{} + peerKey, err := s.parseRequest(ctx, hello, jobReq) + if err != nil { + return wgtypes.Key{}, err + } + + return peerKey, nil +} + +func (s *Server) startResponseReceiver(ctx context.Context, srv proto.ManagementService_JobServer) { + go func() { + for { + msg, err := srv.Recv() + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, context.Canceled) { + return + } + log.WithContext(ctx).Warnf("recv job response error: %v", err) + return + } + + jobResp := &proto.JobResponse{} + if _, err := s.parseRequest(ctx, msg, jobResp); err != nil { + log.WithContext(ctx).Warnf("invalid job response: %v", err) + continue + } + + if err := s.jobManager.HandleResponse(ctx, jobResp, msg.WgPubKey); err != nil { + log.WithContext(ctx).Errorf("handle job response failed: %v", err) + } + } + }() +} + +func (s *Server) sendJobsLoop(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates *job.Channel, srv proto.ManagementService_JobServer) error { + // todo figure out better error handling strategy + defer s.jobManager.CloseChannel(ctx, accountID, peer.ID) + + for { + event, err := updates.Event(ctx) + if err != nil { + if errors.Is(err, job.ErrJobChannelClosed) { + log.WithContext(ctx).Debugf("jobs channel for peer %s was closed", peerKey.String()) + return nil + } + + // happens when connection drops, e.g. client disconnects + log.WithContext(ctx).Debugf("stream of peer %s has been closed", peerKey.String()) + return ctx.Err() + } + + if err := s.sendJob(ctx, peerKey, event, srv); err != nil { + log.WithContext(ctx).Warnf("send job failed: %v", err) + return nil + } + } +} + // handleUpdates sends updates to the connected peer until the updates channel is closed. func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wgtypes.Key, peer *nbpeer.Peer, updates chan *network_map.UpdateMessage, srv proto.ManagementService_SyncServer) error { log.WithContext(ctx).Tracef("starting to handle updates for peer %s", peerKey.String()) @@ -306,7 +412,6 @@ func (s *Server) handleUpdates(ctx context.Context, accountID string, peerKey wg return nil } log.WithContext(ctx).Debugf("received an update for peer %s", peerKey.String()) - if err := s.sendUpdate(ctx, accountID, peerKey, peer, update, srv); err != nil { log.WithContext(ctx).Debugf("error while sending an update to peer %s: %v", peerKey.String(), err) return err @@ -336,7 +441,7 @@ func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtyp s.cancelPeerRoutines(ctx, accountID, peer) return status.Errorf(codes.Internal, "failed processing update message") } - err = srv.SendMsg(&proto.EncryptedMessage{ + err = srv.Send(&proto.EncryptedMessage{ WgPubKey: key.PublicKey().String(), Body: encryptedResp, }) @@ -348,6 +453,31 @@ func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtyp return nil } +// sendJob encrypts the update message using the peer key and the server's wireguard key, +// then sends the encrypted message to the connected peer via the sync server. +func (s *Server) sendJob(ctx context.Context, peerKey wgtypes.Key, job *job.Event, srv proto.ManagementService_JobServer) error { + wgKey, err := s.secretsManager.GetWGKey() + if err != nil { + log.WithContext(ctx).Errorf("failed to get wg key for peer %s: %v", peerKey.String(), err) + return status.Errorf(codes.Internal, "failed processing job message") + } + + encryptedResp, err := encryption.EncryptMessage(peerKey, wgKey, job.Request) + if err != nil { + log.WithContext(ctx).Errorf("failed to encrypt job for peer %s: %v", peerKey.String(), err) + return status.Errorf(codes.Internal, "failed processing job message") + } + err = srv.Send(&proto.EncryptedMessage{ + WgPubKey: wgKey.PublicKey().String(), + Body: encryptedResp, + }) + if err != nil { + return status.Errorf(codes.Internal, "failed sending job message") + } + log.WithContext(ctx).Debugf("sent a job to peer: %s", peerKey.String()) + return nil +} + func (s *Server) cancelPeerRoutines(ctx context.Context, accountID string, peer *nbpeer.Peer) { unlock := s.acquirePeerLockByUID(ctx, peer.Key) defer unlock() @@ -690,8 +820,8 @@ func (s *Server) IsHealthy(ctx context.Context, req *proto.Empty) (*proto.Empty, // sendInitialSync sends initial proto.SyncResponse to the peer requesting synchronization func (s *Server) sendInitialSync(ctx context.Context, peerKey wgtypes.Key, peer *nbpeer.Peer, networkMap *types.NetworkMap, postureChecks []*posture.Checks, srv proto.ManagementService_SyncServer, dnsFwdPort int64) error { var err error - var turnToken *Token + if s.config.TURNConfig != nil && s.config.TURNConfig.TimeBasedCredentials { turnToken, err = s.secretsManager.GenerateTurnToken() if err != nil { diff --git a/management/server/account.go b/management/server/account.go index 61882411b..d453b87c3 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -15,6 +15,7 @@ import ( "sync" "time" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/shared/auth" cacheStore "github.com/eko/gocache/lib/v4/store" @@ -70,6 +71,7 @@ type DefaultAccountManager struct { // cacheLoading keeps the accountIDs that are currently reloading. The accountID has to be removed once cache has been reloaded cacheLoading map[string]chan struct{} networkMapController network_map.Controller + jobManager *job.Manager idpManager idp.Manager cacheManager *nbcache.AccountUserDataCache externalCacheManager nbcache.UserDataCache @@ -178,6 +180,7 @@ func BuildManager( config *nbconfig.Config, store store.Store, networkMapController network_map.Controller, + jobManager *job.Manager, idpManager idp.Manager, singleAccountModeDomain string, eventStore activity.Store, @@ -200,6 +203,7 @@ func BuildManager( config: config, geo: geo, networkMapController: networkMapController, + jobManager: jobManager, idpManager: idpManager, ctx: context.Background(), cacheMux: sync.Mutex{}, diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 7680a8464..5e9bb42a2 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -30,8 +30,15 @@ type Manager interface { autoGroups []string, usageLimit int, userID string, ephemeral bool, allowExtraDNSLabels bool) (*types.SetupKey, error) SaveSetupKey(ctx context.Context, accountID string, key *types.SetupKey, userID string) (*types.SetupKey, error) CreateUser(ctx context.Context, accountID, initiatorUserID string, key *types.UserInfo) (*types.UserInfo, error) + CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) + AcceptUserInvite(ctx context.Context, token, password string) error + RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) + GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) + ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) + DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error DeleteUser(ctx context.Context, accountID, initiatorUserID string, targetUserID string) error DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error + UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error InviteUser(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error ApproveUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) (*types.UserInfo, error) RejectUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) error @@ -129,4 +136,7 @@ type Manager interface { CreateIdentityProvider(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) UpdateIdentityProvider(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) DeleteIdentityProvider(ctx context.Context, accountID, idpID, userID string) error + CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error + GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) + GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) } diff --git a/management/server/account_test.go b/management/server/account_test.go index 3279a373b..86cc69e8b 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -35,6 +35,7 @@ import ( "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -3023,13 +3024,14 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU AnyTimes() permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - manager, err := BuildManager(ctx, &config.Config{}, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + manager, err := BuildManager(ctx, &config.Config{}, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, nil, err } diff --git a/management/server/activity/codes.go b/management/server/activity/codes.go index 6254c02db..e1b7e5300 100644 --- a/management/server/activity/codes.go +++ b/management/server/activity/codes.go @@ -195,9 +195,18 @@ const ( DNSRecordUpdated Activity = 100 DNSRecordDeleted Activity = 101 - ServiceCreated Activity = 102 - ServiceUpdated Activity = 103 - ServiceDeleted Activity = 104 + JobCreatedByUser Activity = 102 + + UserPasswordChanged Activity = 103 + + UserInviteLinkCreated Activity = 104 + UserInviteLinkAccepted Activity = 105 + UserInviteLinkRegenerated Activity = 106 + UserInviteLinkDeleted Activity = 107 + + ServiceCreated Activity = 108 + ServiceUpdated Activity = 109 + ServiceDeleted Activity = 110 AccountDeleted Activity = 99999 ) @@ -324,6 +333,15 @@ var activityMap = map[Activity]Code{ DNSRecordUpdated: {"DNS zone record updated", "dns.zone.record.update"}, DNSRecordDeleted: {"DNS zone record deleted", "dns.zone.record.delete"}, + JobCreatedByUser: {"Create Job for peer", "peer.job.create"}, + + UserPasswordChanged: {"User password changed", "user.password.change"}, + + UserInviteLinkCreated: {"User invite link created", "user.invite.link.create"}, + UserInviteLinkAccepted: {"User invite link accepted", "user.invite.link.accept"}, + UserInviteLinkRegenerated: {"User invite link regenerated", "user.invite.link.regenerate"}, + UserInviteLinkDeleted: {"User invite link deleted", "user.invite.link.delete"}, + ServiceCreated: {"Service created", "service.create"}, ServiceUpdated: {"Service updated", "service.update"}, ServiceDeleted: {"Service deleted", "service.delete"}, diff --git a/management/server/activity/store/crypt.go b/management/server/activity/store/crypt.go deleted file mode 100644 index ce97347d4..000000000 --- a/management/server/activity/store/crypt.go +++ /dev/null @@ -1,136 +0,0 @@ -package store - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "encoding/base64" - "errors" -) - -var iv = []byte{10, 22, 13, 79, 05, 8, 52, 91, 87, 98, 88, 98, 35, 25, 13, 05} - -type FieldEncrypt struct { - block cipher.Block - gcm cipher.AEAD -} - -func GenerateKey() (string, error) { - key := make([]byte, 32) - _, err := rand.Read(key) - if err != nil { - return "", err - } - readableKey := base64.StdEncoding.EncodeToString(key) - return readableKey, nil -} - -func NewFieldEncrypt(key string) (*FieldEncrypt, error) { - binKey, err := base64.StdEncoding.DecodeString(key) - if err != nil { - return nil, err - } - - block, err := aes.NewCipher(binKey) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - ec := &FieldEncrypt{ - block: block, - gcm: gcm, - } - - return ec, nil -} - -func (ec *FieldEncrypt) LegacyEncrypt(payload string) string { - plainText := pkcs5Padding([]byte(payload)) - cipherText := make([]byte, len(plainText)) - cbc := cipher.NewCBCEncrypter(ec.block, iv) - cbc.CryptBlocks(cipherText, plainText) - return base64.StdEncoding.EncodeToString(cipherText) -} - -// Encrypt encrypts plaintext using AES-GCM -func (ec *FieldEncrypt) Encrypt(payload string) (string, error) { - plaintext := []byte(payload) - nonceSize := ec.gcm.NonceSize() - - nonce := make([]byte, nonceSize, len(plaintext)+nonceSize+ec.gcm.Overhead()) - if _, err := rand.Read(nonce); err != nil { - return "", err - } - - ciphertext := ec.gcm.Seal(nonce, nonce, plaintext, nil) - - return base64.StdEncoding.EncodeToString(ciphertext), nil -} - -func (ec *FieldEncrypt) LegacyDecrypt(data string) (string, error) { - cipherText, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return "", err - } - cbc := cipher.NewCBCDecrypter(ec.block, iv) - cbc.CryptBlocks(cipherText, cipherText) - payload, err := pkcs5UnPadding(cipherText) - if err != nil { - return "", err - } - - return string(payload), nil -} - -// Decrypt decrypts ciphertext using AES-GCM -func (ec *FieldEncrypt) Decrypt(data string) (string, error) { - cipherText, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return "", err - } - - nonceSize := ec.gcm.NonceSize() - if len(cipherText) < nonceSize { - return "", errors.New("cipher text too short") - } - - nonce, cipherText := cipherText[:nonceSize], cipherText[nonceSize:] - plainText, err := ec.gcm.Open(nil, nonce, cipherText, nil) - if err != nil { - return "", err - } - - return string(plainText), nil -} - -func pkcs5Padding(ciphertext []byte) []byte { - padding := aes.BlockSize - len(ciphertext)%aes.BlockSize - padText := bytes.Repeat([]byte{byte(padding)}, padding) - return append(ciphertext, padText...) -} -func pkcs5UnPadding(src []byte) ([]byte, error) { - srcLen := len(src) - if srcLen == 0 { - return nil, errors.New("input data is empty") - } - - paddingLen := int(src[srcLen-1]) - if paddingLen == 0 || paddingLen > aes.BlockSize || paddingLen > srcLen { - return nil, errors.New("invalid padding size") - } - - // Verify that all padding bytes are the same - for i := 0; i < paddingLen; i++ { - if src[srcLen-1-i] != byte(paddingLen) { - return nil, errors.New("invalid padding") - } - } - - return src[:srcLen-paddingLen], nil -} diff --git a/management/server/activity/store/crypt_test.go b/management/server/activity/store/crypt_test.go deleted file mode 100644 index 700bbcd6b..000000000 --- a/management/server/activity/store/crypt_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package store - -import ( - "bytes" - "testing" -) - -func TestGenerateKey(t *testing.T) { - testData := "exampl@netbird.io" - key, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - ee, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - encrypted, err := ee.Encrypt(testData) - if err != nil { - t.Fatalf("failed to encrypt data: %s", err) - } - - if encrypted == "" { - t.Fatalf("invalid encrypted text") - } - - decrypted, err := ee.Decrypt(encrypted) - if err != nil { - t.Fatalf("failed to decrypt data: %s", err) - } - - if decrypted != testData { - t.Fatalf("decrypted data is not match with test data: %s, %s", testData, decrypted) - } -} - -func TestGenerateKeyLegacy(t *testing.T) { - testData := "exampl@netbird.io" - key, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - ee, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - encrypted := ee.LegacyEncrypt(testData) - if encrypted == "" { - t.Fatalf("invalid encrypted text") - } - - decrypted, err := ee.LegacyDecrypt(encrypted) - if err != nil { - t.Fatalf("failed to decrypt data: %s", err) - } - - if decrypted != testData { - t.Fatalf("decrypted data is not match with test data: %s, %s", testData, decrypted) - } -} - -func TestCorruptKey(t *testing.T) { - testData := "exampl@netbird.io" - key, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - ee, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - encrypted, err := ee.Encrypt(testData) - if err != nil { - t.Fatalf("failed to encrypt data: %s", err) - } - - if encrypted == "" { - t.Fatalf("invalid encrypted text") - } - - newKey, err := GenerateKey() - if err != nil { - t.Fatalf("failed to generate key: %s", err) - } - - ee, err = NewFieldEncrypt(newKey) - if err != nil { - t.Fatalf("failed to init email encryption: %s", err) - } - - res, _ := ee.Decrypt(encrypted) - if res == testData { - t.Fatalf("incorrect decryption, the result is: %s", res) - } -} - -func TestEncryptDecrypt(t *testing.T) { - // Generate a key for encryption/decryption - key, err := GenerateKey() - if err != nil { - t.Fatalf("Failed to generate key: %v", err) - } - - // Initialize the FieldEncrypt with the generated key - ec, err := NewFieldEncrypt(key) - if err != nil { - t.Fatalf("Failed to create FieldEncrypt: %v", err) - } - - // Test cases - testCases := []struct { - name string - input string - }{ - { - name: "Empty String", - input: "", - }, - { - name: "Short String", - input: "Hello", - }, - { - name: "String with Spaces", - input: "Hello, World!", - }, - { - name: "Long String", - input: "The quick brown fox jumps over the lazy dog.", - }, - { - name: "Unicode Characters", - input: "こんにちは世界", - }, - { - name: "Special Characters", - input: "!@#$%^&*()_+-=[]{}|;':\",./<>?", - }, - { - name: "Numeric String", - input: "1234567890", - }, - { - name: "Repeated Characters", - input: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - }, - { - name: "Multi-block String", - input: "This is a longer string that will span multiple blocks in the encryption algorithm.", - }, - { - name: "Non-ASCII and ASCII Mix", - input: "Hello 世界 123", - }, - } - - for _, tc := range testCases { - t.Run(tc.name+" - Legacy", func(t *testing.T) { - // Legacy Encryption - encryptedLegacy := ec.LegacyEncrypt(tc.input) - if encryptedLegacy == "" { - t.Errorf("LegacyEncrypt returned empty string for input '%s'", tc.input) - } - - // Legacy Decryption - decryptedLegacy, err := ec.LegacyDecrypt(encryptedLegacy) - if err != nil { - t.Errorf("LegacyDecrypt failed for input '%s': %v", tc.input, err) - } - - // Verify that the decrypted value matches the original input - if decryptedLegacy != tc.input { - t.Errorf("LegacyDecrypt output '%s' does not match original input '%s'", decryptedLegacy, tc.input) - } - }) - - t.Run(tc.name+" - New", func(t *testing.T) { - // New Encryption - encryptedNew, err := ec.Encrypt(tc.input) - if err != nil { - t.Errorf("Encrypt failed for input '%s': %v", tc.input, err) - } - if encryptedNew == "" { - t.Errorf("Encrypt returned empty string for input '%s'", tc.input) - } - - // New Decryption - decryptedNew, err := ec.Decrypt(encryptedNew) - if err != nil { - t.Errorf("Decrypt failed for input '%s': %v", tc.input, err) - } - - // Verify that the decrypted value matches the original input - if decryptedNew != tc.input { - t.Errorf("Decrypt output '%s' does not match original input '%s'", decryptedNew, tc.input) - } - }) - } -} - -func TestPKCS5UnPadding(t *testing.T) { - tests := []struct { - name string - input []byte - expected []byte - expectError bool - }{ - { - name: "Valid Padding", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{4}, 4)...), - expected: []byte("Hello, World!"), - }, - { - name: "Empty Input", - input: []byte{}, - expectError: true, - }, - { - name: "Padding Length Zero", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{0}, 4)...), - expectError: true, - }, - { - name: "Padding Length Exceeds Block Size", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{17}, 17)...), - expectError: true, - }, - { - name: "Padding Length Exceeds Input Length", - input: []byte{5, 5, 5}, - expectError: true, - }, - { - name: "Invalid Padding Bytes", - input: append([]byte("Hello, World!"), []byte{2, 3, 4, 5}...), - expectError: true, - }, - { - name: "Valid Single Byte Padding", - input: append([]byte("Hello, World!"), byte(1)), - expected: []byte("Hello, World!"), - }, - { - name: "Invalid Mixed Padding Bytes", - input: append([]byte("Hello, World!"), []byte{3, 3, 2}...), - expectError: true, - }, - { - name: "Valid Full Block Padding", - input: append([]byte("Hello, World!"), bytes.Repeat([]byte{16}, 16)...), - expected: []byte("Hello, World!"), - }, - { - name: "Non-Padding Byte at End", - input: append([]byte("Hello, World!"), []byte{4, 4, 4, 5}...), - expectError: true, - }, - { - name: "Valid Padding with Different Text Length", - input: append([]byte("Test"), bytes.Repeat([]byte{12}, 12)...), - expected: []byte("Test"), - }, - { - name: "Padding Length Equal to Input Length", - input: bytes.Repeat([]byte{8}, 8), - expected: []byte{}, - }, - { - name: "Invalid Padding Length Zero (Again)", - input: append([]byte("Test"), byte(0)), - expectError: true, - }, - { - name: "Padding Length Greater Than Input", - input: []byte{10}, - expectError: true, - }, - { - name: "Input Length Not Multiple of Block Size", - input: append([]byte("Invalid Length"), byte(1)), - expected: []byte("Invalid Length"), - }, - { - name: "Valid Padding with Non-ASCII Characters", - input: append([]byte("こんにちは"), bytes.Repeat([]byte{2}, 2)...), - expected: []byte("こんにちは"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := pkcs5UnPadding(tt.input) - if tt.expectError { - if err == nil { - t.Errorf("Expected error but got nil") - } - } else { - if err != nil { - t.Errorf("Did not expect error but got: %v", err) - } - if !bytes.Equal(result, tt.expected) { - t.Errorf("Expected output %v, got %v", tt.expected, result) - } - } - }) - } -} diff --git a/management/server/activity/store/migration.go b/management/server/activity/store/migration.go index af19a34eb..d0f165d5f 100644 --- a/management/server/activity/store/migration.go +++ b/management/server/activity/store/migration.go @@ -10,9 +10,10 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/migration" + "github.com/netbirdio/netbird/util/crypt" ) -func migrate(ctx context.Context, crypt *FieldEncrypt, db *gorm.DB) error { +func migrate(ctx context.Context, crypt *crypt.FieldEncrypt, db *gorm.DB) error { migrations := getMigrations(ctx, crypt) for _, m := range migrations { @@ -26,7 +27,7 @@ func migrate(ctx context.Context, crypt *FieldEncrypt, db *gorm.DB) error { type migrationFunc func(*gorm.DB) error -func getMigrations(ctx context.Context, crypt *FieldEncrypt) []migrationFunc { +func getMigrations(ctx context.Context, crypt *crypt.FieldEncrypt) []migrationFunc { return []migrationFunc{ func(db *gorm.DB) error { return migration.MigrateNewField[activity.DeletedUser](ctx, db, "name", "") @@ -45,7 +46,7 @@ func getMigrations(ctx context.Context, crypt *FieldEncrypt) []migrationFunc { // migrateLegacyEncryptedUsersToGCM migrates previously encrypted data using // legacy CBC encryption with a static IV to the new GCM encryption method. -func migrateLegacyEncryptedUsersToGCM(ctx context.Context, db *gorm.DB, crypt *FieldEncrypt) error { +func migrateLegacyEncryptedUsersToGCM(ctx context.Context, db *gorm.DB, crypt *crypt.FieldEncrypt) error { model := &activity.DeletedUser{} if !db.Migrator().HasTable(model) { @@ -80,7 +81,7 @@ func migrateLegacyEncryptedUsersToGCM(ctx context.Context, db *gorm.DB, crypt *F return nil } -func updateDeletedUserData(transaction *gorm.DB, user activity.DeletedUser, crypt *FieldEncrypt) error { +func updateDeletedUserData(transaction *gorm.DB, user activity.DeletedUser, crypt *crypt.FieldEncrypt) error { var err error var decryptedEmail, decryptedName string diff --git a/management/server/activity/store/migration_test.go b/management/server/activity/store/migration_test.go index e3261d9fa..5c6f5ade8 100644 --- a/management/server/activity/store/migration_test.go +++ b/management/server/activity/store/migration_test.go @@ -12,6 +12,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/migration" "github.com/netbirdio/netbird/management/server/testutil" + "github.com/netbirdio/netbird/util/crypt" ) const ( @@ -40,10 +41,10 @@ func setupDatabase(t *testing.T) *gorm.DB { func TestMigrateLegacyEncryptedUsersToGCM(t *testing.T) { db := setupDatabase(t) - key, err := GenerateKey() + key, err := crypt.GenerateKey() require.NoError(t, err, "Failed to generate key") - crypt, err := NewFieldEncrypt(key) + crypt, err := crypt.NewFieldEncrypt(key) require.NoError(t, err, "Failed to initialize FieldEncrypt") t.Run("empty table, no migration required", func(t *testing.T) { diff --git a/management/server/activity/store/sql_store.go b/management/server/activity/store/sql_store.go index ffecb6b8f..db614d0cd 100644 --- a/management/server/activity/store/sql_store.go +++ b/management/server/activity/store/sql_store.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/util/crypt" ) const ( @@ -45,12 +46,12 @@ type eventWithNames struct { // Store is the implementation of the activity.Store interface backed by SQLite type Store struct { db *gorm.DB - fieldEncrypt *FieldEncrypt + fieldEncrypt *crypt.FieldEncrypt } // NewSqlStore creates a new Store with an event table if not exists. func NewSqlStore(ctx context.Context, dataDir string, encryptionKey string) (*Store, error) { - crypt, err := NewFieldEncrypt(encryptionKey) + fieldEncrypt, err := crypt.NewFieldEncrypt(encryptionKey) if err != nil { return nil, err @@ -61,7 +62,7 @@ func NewSqlStore(ctx context.Context, dataDir string, encryptionKey string) (*St return nil, fmt.Errorf("initialize database: %w", err) } - if err = migrate(ctx, crypt, db); err != nil { + if err = migrate(ctx, fieldEncrypt, db); err != nil { return nil, fmt.Errorf("events database migration: %w", err) } @@ -72,7 +73,7 @@ func NewSqlStore(ctx context.Context, dataDir string, encryptionKey string) (*St return &Store{ db: db, - fieldEncrypt: crypt, + fieldEncrypt: fieldEncrypt, }, nil } diff --git a/management/server/activity/store/sql_store_test.go b/management/server/activity/store/sql_store_test.go index 8c0d159df..d723f1623 100644 --- a/management/server/activity/store/sql_store_test.go +++ b/management/server/activity/store/sql_store_test.go @@ -9,11 +9,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/util/crypt" ) func TestNewSqlStore(t *testing.T) { dataDir := t.TempDir() - key, _ := GenerateKey() + key, _ := crypt.GenerateKey() store, err := NewSqlStore(context.Background(), dataDir, key) if err != nil { t.Fatal(err) diff --git a/management/server/dns_test.go b/management/server/dns_test.go index d1da79380..bd0755d0d 100644 --- a/management/server/dns_test.go +++ b/management/server/dns_test.go @@ -16,6 +16,7 @@ import ( ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" @@ -221,13 +222,14 @@ func createDNSManager(t *testing.T) (*DefaultAccountManager, error) { // return empty extra settings for expected calls to UpdateAccountPeers settingsMockManager.EXPECT().GetExtraSettings(gomock.Any(), gomock.Any()).Return(&types.ExtraSettings{}, nil).AnyTimes() permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.test", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - return BuildManager(context.Background(), nil, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) } func createDNSStore(t *testing.T) (store.Store, error) { diff --git a/management/server/http/handler.go b/management/server/http/handler.go index ceb3d931c..3615f0dd0 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -71,6 +71,13 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks if err := bypass.AddBypassPath("/api/setup"); err != nil { return nil, fmt.Errorf("failed to add bypass path: %w", err) } + // Public invite endpoints (tokens start with nbi_) + if err := bypass.AddBypassPath("/api/users/invites/nbi_*"); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } + if err := bypass.AddBypassPath("/api/users/invites/nbi_*/accept"); err != nil { + return nil, fmt.Errorf("failed to add bypass path: %w", err) + } var rateLimitingConfig *middleware.RateLimiterConfig if os.Getenv(rateLimitingEnabledKey) == "true" { @@ -135,6 +142,8 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks accounts.AddEndpoints(accountManager, settingsManager, embeddedIdpEnabled, router) peers.AddEndpoints(accountManager, router, networkMapController) users.AddEndpoints(accountManager, router) + users.AddInvitesEndpoints(accountManager, router) + users.AddPublicInvitesEndpoints(accountManager, router) setup_keys.AddEndpoints(accountManager, router) policies.AddEndpoints(accountManager, LocationManager, router) policies.AddPostureCheckEndpoints(accountManager, LocationManager, router) @@ -148,6 +157,7 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks recordsManager.RegisterEndpoints(router, rManager) idp.AddEndpoints(accountManager, router) instance.AddEndpoints(instanceManager, router) + instance.AddVersionEndpoint(instanceManager, router) services.RegisterEndpoints(router, serviceManager) // Mount embedded IdP handler at /oauth2 path if configured diff --git a/management/server/http/handlers/instance/instance_handler.go b/management/server/http/handlers/instance/instance_handler.go index 889c3133e..5d8baaf8d 100644 --- a/management/server/http/handlers/instance/instance_handler.go +++ b/management/server/http/handlers/instance/instance_handler.go @@ -28,6 +28,15 @@ func AddEndpoints(instanceManager nbinstance.Manager, router *mux.Router) { router.HandleFunc("/setup", h.setup).Methods("POST", "OPTIONS") } +// AddVersionEndpoint registers the authenticated version endpoint. +func AddVersionEndpoint(instanceManager nbinstance.Manager, router *mux.Router) { + h := &handler{ + instanceManager: instanceManager, + } + + router.HandleFunc("/instance/version", h.getVersionInfo).Methods("GET", "OPTIONS") +} + // getInstanceStatus returns the instance status including whether setup is required. // This endpoint is unauthenticated. func (h *handler) getInstanceStatus(w http.ResponseWriter, r *http.Request) { @@ -65,3 +74,29 @@ func (h *handler) setup(w http.ResponseWriter, r *http.Request) { Email: userData.Email, }) } + +// getVersionInfo returns version information for NetBird components. +// This endpoint requires authentication. +func (h *handler) getVersionInfo(w http.ResponseWriter, r *http.Request) { + versionInfo, err := h.instanceManager.GetVersionInfo(r.Context()) + if err != nil { + log.WithContext(r.Context()).Errorf("failed to get version info: %v", err) + util.WriteErrorResponse("failed to get version info", http.StatusInternalServerError, w) + return + } + + resp := api.InstanceVersionInfo{ + ManagementCurrentVersion: versionInfo.CurrentVersion, + ManagementUpdateAvailable: versionInfo.ManagementUpdateAvailable, + } + + if versionInfo.DashboardVersion != "" { + resp.DashboardAvailableVersion = &versionInfo.DashboardVersion + } + + if versionInfo.ManagementVersion != "" { + resp.ManagementAvailableVersion = &versionInfo.ManagementVersion + } + + util.WriteJSONObject(r.Context(), w, resp) +} diff --git a/management/server/http/handlers/instance/instance_handler_test.go b/management/server/http/handlers/instance/instance_handler_test.go index 7a3a2bc88..470079c85 100644 --- a/management/server/http/handlers/instance/instance_handler_test.go +++ b/management/server/http/handlers/instance/instance_handler_test.go @@ -25,6 +25,7 @@ type mockInstanceManager struct { isSetupRequired bool isSetupRequiredFn func(ctx context.Context) (bool, error) createOwnerUserFn func(ctx context.Context, email, password, name string) (*idp.UserData, error) + getVersionInfoFn func(ctx context.Context) (*nbinstance.VersionInfo, error) } func (m *mockInstanceManager) IsSetupRequired(ctx context.Context) (bool, error) { @@ -66,6 +67,18 @@ func (m *mockInstanceManager) CreateOwnerUser(ctx context.Context, email, passwo }, nil } +func (m *mockInstanceManager) GetVersionInfo(ctx context.Context) (*nbinstance.VersionInfo, error) { + if m.getVersionInfoFn != nil { + return m.getVersionInfoFn(ctx) + } + return &nbinstance.VersionInfo{ + CurrentVersion: "0.34.0", + DashboardVersion: "2.0.0", + ManagementVersion: "0.35.0", + ManagementUpdateAvailable: true, + }, nil +} + var _ nbinstance.Manager = (*mockInstanceManager)(nil) func setupTestRouter(manager nbinstance.Manager) *mux.Router { @@ -279,3 +292,44 @@ func TestSetup_ManagerError(t *testing.T) { assert.Equal(t, http.StatusInternalServerError, rec.Code) } + +func TestGetVersionInfo_Success(t *testing.T) { + manager := &mockInstanceManager{} + router := mux.NewRouter() + AddVersionEndpoint(manager, router) + + req := httptest.NewRequest(http.MethodGet, "/instance/version", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + + var response api.InstanceVersionInfo + err := json.NewDecoder(rec.Body).Decode(&response) + require.NoError(t, err) + + assert.Equal(t, "0.34.0", response.ManagementCurrentVersion) + assert.NotNil(t, response.DashboardAvailableVersion) + assert.Equal(t, "2.0.0", *response.DashboardAvailableVersion) + assert.NotNil(t, response.ManagementAvailableVersion) + assert.Equal(t, "0.35.0", *response.ManagementAvailableVersion) + assert.True(t, response.ManagementUpdateAvailable) +} + +func TestGetVersionInfo_Error(t *testing.T) { + manager := &mockInstanceManager{ + getVersionInfoFn: func(ctx context.Context) (*nbinstance.VersionInfo, error) { + return nil, errors.New("failed to fetch versions") + }, + } + router := mux.NewRouter() + AddVersionEndpoint(manager, router) + + req := httptest.NewRequest(http.MethodGet, "/instance/version", nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusInternalServerError, rec.Code) +} diff --git a/management/server/http/handlers/peers/peers_handler.go b/management/server/http/handlers/peers/peers_handler.go index b8fb3ea36..53d8ab055 100644 --- a/management/server/http/handlers/peers/peers_handler.go +++ b/management/server/http/handlers/peers/peers_handler.go @@ -36,6 +36,9 @@ func AddEndpoints(accountManager account.Manager, router *mux.Router, networkMap Methods("GET", "PUT", "DELETE", "OPTIONS") router.HandleFunc("/peers/{peerId}/accessible-peers", peersHandler.GetAccessiblePeers).Methods("GET", "OPTIONS") router.HandleFunc("/peers/{peerId}/temporary-access", peersHandler.CreateTemporaryAccess).Methods("POST", "OPTIONS") + router.HandleFunc("/peers/{peerId}/jobs", peersHandler.ListJobs).Methods("GET", "OPTIONS") + router.HandleFunc("/peers/{peerId}/jobs", peersHandler.CreateJob).Methods("POST", "OPTIONS") + router.HandleFunc("/peers/{peerId}/jobs/{jobId}", peersHandler.GetJob).Methods("GET", "OPTIONS") } // NewHandler creates a new peers Handler @@ -46,6 +49,99 @@ func NewHandler(accountManager account.Manager, networkMapController network_map } } +func (h *Handler) CreateJob(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userAuth, err := nbcontext.GetUserAuthFromContext(ctx) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + vars := mux.Vars(r) + peerID := vars["peerId"] + + req := &api.JobRequest{} + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + job, err := types.NewJob(userAuth.UserId, userAuth.AccountId, peerID, req) + if err != nil { + util.WriteError(ctx, err, w) + return + } + if err := h.accountManager.CreatePeerJob(ctx, userAuth.AccountId, peerID, userAuth.UserId, job); err != nil { + util.WriteError(ctx, err, w) + return + } + + resp, err := toSingleJobResponse(job) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + util.WriteJSONObject(ctx, w, resp) +} + +func (h *Handler) ListJobs(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userAuth, err := nbcontext.GetUserAuthFromContext(ctx) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + vars := mux.Vars(r) + peerID := vars["peerId"] + + jobs, err := h.accountManager.GetAllPeerJobs(ctx, userAuth.AccountId, userAuth.UserId, peerID) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + respBody := make([]*api.JobResponse, 0, len(jobs)) + for _, job := range jobs { + resp, err := toSingleJobResponse(job) + if err != nil { + util.WriteError(ctx, err, w) + return + } + respBody = append(respBody, resp) + } + + util.WriteJSONObject(ctx, w, respBody) +} + +func (h *Handler) GetJob(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + userAuth, err := nbcontext.GetUserAuthFromContext(ctx) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + vars := mux.Vars(r) + peerID := vars["peerId"] + jobID := vars["jobId"] + + job, err := h.accountManager.GetPeerJobByID(ctx, userAuth.AccountId, userAuth.UserId, peerID, jobID) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + resp, err := toSingleJobResponse(job) + if err != nil { + util.WriteError(ctx, err, w) + return + } + + util.WriteJSONObject(ctx, w, resp) +} + func (h *Handler) getPeer(ctx context.Context, accountID, peerID, userID string, w http.ResponseWriter) { peer, err := h.accountManager.GetPeer(ctx, accountID, peerID, userID) if err != nil { @@ -521,6 +617,28 @@ func toPeerListItemResponse(peer *nbpeer.Peer, groupsInfo []api.GroupMinimum, dn } } +func toSingleJobResponse(job *types.Job) (*api.JobResponse, error) { + workload, err := job.BuildWorkloadResponse() + if err != nil { + return nil, err + } + + var failed *string + if job.FailedReason != "" { + failed = &job.FailedReason + } + + return &api.JobResponse{ + Id: job.ID, + CreatedAt: job.CreatedAt, + CompletedAt: job.CompletedAt, + TriggeredBy: job.TriggeredBy, + Status: api.JobResponseStatus(job.Status), + FailedReason: failed, + Workload: *workload, + }, nil +} + func fqdn(peer *nbpeer.Peer, dnsDomain string) string { fqdn := peer.FQDN(dnsDomain) if fqdn == "" { diff --git a/management/server/http/handlers/users/invites_handler.go b/management/server/http/handlers/users/invites_handler.go new file mode 100644 index 000000000..0f0f57c29 --- /dev/null +++ b/management/server/http/handlers/users/invites_handler.go @@ -0,0 +1,263 @@ +package users + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "time" + + "github.com/gorilla/mux" + + "github.com/netbirdio/netbird/management/server/account" + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/http/middleware" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" + "github.com/netbirdio/netbird/shared/management/status" +) + +// publicInviteRateLimiter limits public invite requests by IP address to prevent brute-force attacks +var publicInviteRateLimiter = middleware.NewAPIRateLimiter(&middleware.RateLimiterConfig{ + RequestsPerMinute: 10, // 10 attempts per minute per IP + Burst: 5, // Allow burst of 5 requests + CleanupInterval: 10 * time.Minute, + LimiterTTL: 30 * time.Minute, +}) + +// toUserInviteResponse converts a UserInvite to an API response. +func toUserInviteResponse(invite *types.UserInvite) api.UserInvite { + autoGroups := invite.UserInfo.AutoGroups + if autoGroups == nil { + autoGroups = []string{} + } + var inviteLink *string + if invite.InviteToken != "" { + inviteLink = &invite.InviteToken + } + return api.UserInvite{ + Id: invite.UserInfo.ID, + Email: invite.UserInfo.Email, + Name: invite.UserInfo.Name, + Role: invite.UserInfo.Role, + AutoGroups: autoGroups, + ExpiresAt: invite.InviteExpiresAt.UTC(), + CreatedAt: invite.InviteCreatedAt.UTC(), + Expired: time.Now().After(invite.InviteExpiresAt), + InviteToken: inviteLink, + } +} + +// invitesHandler handles user invite operations +type invitesHandler struct { + accountManager account.Manager +} + +// AddInvitesEndpoints registers invite-related endpoints +func AddInvitesEndpoints(accountManager account.Manager, router *mux.Router) { + h := &invitesHandler{accountManager: accountManager} + + // Authenticated endpoints (require admin) + router.HandleFunc("/users/invites", h.listInvites).Methods("GET", "OPTIONS") + router.HandleFunc("/users/invites", h.createInvite).Methods("POST", "OPTIONS") + router.HandleFunc("/users/invites/{inviteId}", h.deleteInvite).Methods("DELETE", "OPTIONS") + router.HandleFunc("/users/invites/{inviteId}/regenerate", h.regenerateInvite).Methods("POST", "OPTIONS") +} + +// AddPublicInvitesEndpoints registers public (unauthenticated) invite endpoints with rate limiting +func AddPublicInvitesEndpoints(accountManager account.Manager, router *mux.Router) { + h := &invitesHandler{accountManager: accountManager} + + // Create a subrouter for public invite endpoints with rate limiting middleware + publicRouter := router.PathPrefix("/users/invites").Subrouter() + publicRouter.Use(publicInviteRateLimiter.Middleware) + + // Public endpoints (no auth required, protected by token and rate limited) + publicRouter.HandleFunc("/{token}", h.getInviteInfo).Methods("GET", "OPTIONS") + publicRouter.HandleFunc("/{token}/accept", h.acceptInvite).Methods("POST", "OPTIONS") +} + +// listInvites handles GET /api/users/invites +func (h *invitesHandler) listInvites(w http.ResponseWriter, r *http.Request) { + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + invites, err := h.accountManager.ListUserInvites(r.Context(), userAuth.AccountId, userAuth.UserId) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + resp := make([]api.UserInvite, 0, len(invites)) + for _, invite := range invites { + resp = append(resp, toUserInviteResponse(invite)) + } + + util.WriteJSONObject(r.Context(), w, resp) +} + +// createInvite handles POST /api/users/invites +func (h *invitesHandler) createInvite(w http.ResponseWriter, r *http.Request) { + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req api.UserInviteCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + invite := &types.UserInfo{ + Email: req.Email, + Name: req.Name, + Role: req.Role, + AutoGroups: req.AutoGroups, + } + + expiresIn := 0 + if req.ExpiresIn != nil { + expiresIn = *req.ExpiresIn + } + + result, err := h.accountManager.CreateUserInvite(r.Context(), userAuth.AccountId, userAuth.UserId, invite, expiresIn) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + result.InviteCreatedAt = time.Now().UTC() + resp := toUserInviteResponse(result) + util.WriteJSONObject(r.Context(), w, &resp) +} + +// getInviteInfo handles GET /api/users/invites/{token} +func (h *invitesHandler) getInviteInfo(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + token := vars["token"] + if token == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "token is required"), w) + return + } + + info, err := h.accountManager.GetUserInviteInfo(r.Context(), token) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + expiresAt := info.ExpiresAt.UTC() + util.WriteJSONObject(r.Context(), w, &api.UserInviteInfo{ + Email: info.Email, + Name: info.Name, + ExpiresAt: expiresAt, + Valid: info.Valid, + InvitedBy: info.InvitedBy, + }) +} + +// acceptInvite handles POST /api/users/invites/{token}/accept +func (h *invitesHandler) acceptInvite(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + token := vars["token"] + if token == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "token is required"), w) + return + } + + var req api.UserInviteAcceptRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + err := h.accountManager.AcceptUserInvite(r.Context(), token, req.Password) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, &api.UserInviteAcceptResponse{Success: true}) +} + +// regenerateInvite handles POST /api/users/invites/{inviteId}/regenerate +func (h *invitesHandler) regenerateInvite(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + util.WriteErrorResponse("wrong HTTP method", http.StatusMethodNotAllowed, w) + return + } + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + vars := mux.Vars(r) + inviteID := vars["inviteId"] + if inviteID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "invite ID is required"), w) + return + } + + var req api.UserInviteRegenerateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + // Allow empty body (io.EOF) - expiresIn is optional + if !errors.Is(err, io.EOF) { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + } + + expiresIn := 0 + if req.ExpiresIn != nil { + expiresIn = *req.ExpiresIn + } + + result, err := h.accountManager.RegenerateUserInvite(r.Context(), userAuth.AccountId, userAuth.UserId, inviteID, expiresIn) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + expiresAt := result.InviteExpiresAt.UTC() + util.WriteJSONObject(r.Context(), w, &api.UserInviteRegenerateResponse{ + InviteToken: result.InviteToken, + InviteExpiresAt: expiresAt, + }) +} + +// deleteInvite handles DELETE /api/users/invites/{inviteId} +func (h *invitesHandler) deleteInvite(w http.ResponseWriter, r *http.Request) { + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + vars := mux.Vars(r) + inviteID := vars["inviteId"] + if inviteID == "" { + util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "invite ID is required"), w) + return + } + + err = h.accountManager.DeleteUserInvite(r.Context(), userAuth.AccountId, userAuth.UserId, inviteID) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/server/http/handlers/users/invites_handler_test.go b/management/server/http/handlers/users/invites_handler_test.go new file mode 100644 index 000000000..80826b9d4 --- /dev/null +++ b/management/server/http/handlers/users/invites_handler_test.go @@ -0,0 +1,642 @@ +package users + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbcontext "github.com/netbirdio/netbird/management/server/context" + "github.com/netbirdio/netbird/management/server/mock_server" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/auth" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/status" +) + +const ( + testAccountID = "test-account-id" + testUserID = "test-user-id" + testInviteID = "test-invite-id" + testInviteToken = "nbi_testtoken123456789012345678" + testEmail = "invite@example.com" + testName = "Test User" +) + +func setupInvitesTestHandler(am *mock_server.MockAccountManager) *invitesHandler { + return &invitesHandler{ + accountManager: am, + } +} + +func TestListInvites(t *testing.T) { + now := time.Now().UTC() + testInvites := []*types.UserInvite{ + { + UserInfo: &types.UserInfo{ + ID: "invite-1", + Email: "user1@example.com", + Name: "User One", + Role: "user", + AutoGroups: []string{"group-1"}, + }, + InviteExpiresAt: now.Add(24 * time.Hour), + InviteCreatedAt: now, + }, + { + UserInfo: &types.UserInfo{ + ID: "invite-2", + Email: "user2@example.com", + Name: "User Two", + Role: "admin", + AutoGroups: nil, + }, + InviteExpiresAt: now.Add(-1 * time.Hour), // Expired + InviteCreatedAt: now.Add(-48 * time.Hour), + }, + } + + tt := []struct { + name string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) + expectedCount int + }{ + { + name: "successful list", + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + return testInvites, nil + }, + expectedCount: 2, + }, + { + name: "empty list", + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + return []*types.UserInvite{}, nil + }, + expectedCount: 0, + }, + { + name: "permission denied", + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + return nil, status.NewPermissionDeniedError() + }, + expectedCount: 0, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + ListUserInvitesFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodGet, "/api/users/invites", nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + rr := httptest.NewRecorder() + handler.listInvites(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp []api.UserInvite + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.Len(t, resp, tc.expectedCount) + } + }) + } +} + +func TestCreateInvite(t *testing.T) { + now := time.Now().UTC() + expiresAt := now.Add(72 * time.Hour) + + tt := []struct { + name string + requestBody string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) + }{ + { + name: "successful create", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":["group-1"]}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: testInviteID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: invite.AutoGroups, + Status: string(types.UserStatusInvited), + }, + InviteToken: testInviteToken, + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "successful create with custom expiration", + requestBody: `{"email":"test@example.com","name":"Test User","role":"admin","auto_groups":[],"expires_in":3600}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + assert.Equal(t, 3600, expiresIn) + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: testInviteID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: []string{}, + Status: string(types.UserStatusInvited), + }, + InviteToken: testInviteToken, + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "user already exists", + requestBody: `{"email":"existing@example.com","name":"Existing User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusConflict, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.UserAlreadyExists, "user with this email already exists") + }, + }, + { + name: "invite already exists", + requestBody: `{"email":"invited@example.com","name":"Invited User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusConflict, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.AlreadyExists, "invite already exists for this email") + }, + }, + { + name: "permission denied", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.NewPermissionDeniedError() + }, + }, + { + name: "embedded IDP not enabled", + requestBody: `{"email":"test@example.com","name":"Test User","role":"user","auto_groups":[]}`, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + }, + }, + { + name: "invalid JSON", + requestBody: `{invalid json}`, + expectedStatus: http.StatusBadRequest, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + CreateUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodPost, "/api/users/invites", bytes.NewBufferString(tc.requestBody)) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + + rr := httptest.NewRecorder() + handler.createInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInvite + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.Equal(t, testInviteID, resp.Id) + assert.NotNil(t, resp.InviteToken) + assert.NotEmpty(t, *resp.InviteToken) + } + }) + } +} + +func TestGetInviteInfo(t *testing.T) { + now := time.Now().UTC() + + tt := []struct { + name string + token string + expectedStatus int + mockFunc func(ctx context.Context, token string) (*types.UserInviteInfo, error) + }{ + { + name: "successful get valid invite", + token: testInviteToken, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return &types.UserInviteInfo{ + Email: testEmail, + Name: testName, + ExpiresAt: now.Add(24 * time.Hour), + Valid: true, + InvitedBy: "Admin User", + }, nil + }, + }, + { + name: "successful get expired invite", + token: testInviteToken, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return &types.UserInviteInfo{ + Email: testEmail, + Name: testName, + ExpiresAt: now.Add(-24 * time.Hour), + Valid: false, + InvitedBy: "Admin User", + }, nil + }, + }, + { + name: "invite not found", + token: "nbi_invalidtoken1234567890123456", + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return nil, status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "invalid token format", + token: "invalid", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token string) (*types.UserInviteInfo, error) { + return nil, status.Errorf(status.InvalidArgument, "invalid invite token") + }, + }, + { + name: "missing token", + token: "", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + GetUserInviteInfoFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodGet, "/api/users/invites/"+tc.token, nil) + if tc.token != "" { + req = mux.SetURLVars(req, map[string]string{"token": tc.token}) + } + + rr := httptest.NewRecorder() + handler.getInviteInfo(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInviteInfo + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.Equal(t, testEmail, resp.Email) + assert.Equal(t, testName, resp.Name) + } + }) + } +} + +func TestAcceptInvite(t *testing.T) { + tt := []struct { + name string + token string + requestBody string + expectedStatus int + mockFunc func(ctx context.Context, token, password string) error + }{ + { + name: "successful accept", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, token, password string) error { + return nil + }, + }, + { + name: "invite not found", + token: "nbi_invalidtoken1234567890123456", + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "invite expired", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "invite has expired") + }, + }, + { + name: "embedded IDP not enabled", + token: testInviteToken, + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + }, + }, + { + name: "missing token", + token: "", + requestBody: `{"password":"SecurePass123!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + { + name: "invalid JSON", + token: testInviteToken, + requestBody: `{invalid}`, + expectedStatus: http.StatusBadRequest, + mockFunc: nil, + }, + { + name: "password too short", + token: testInviteToken, + requestBody: `{"password":"Short1!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must be at least 8 characters long") + }, + }, + { + name: "password missing digit", + token: testInviteToken, + requestBody: `{"password":"NoDigitPass!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must contain at least one digit") + }, + }, + { + name: "password missing uppercase", + token: testInviteToken, + requestBody: `{"password":"nouppercase1!"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must contain at least one uppercase letter") + }, + }, + { + name: "password missing special character", + token: testInviteToken, + requestBody: `{"password":"NoSpecial123"}`, + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: func(ctx context.Context, token, password string) error { + return status.Errorf(status.InvalidArgument, "password must contain at least one special character") + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + AcceptUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodPost, "/api/users/invites/"+tc.token+"/accept", bytes.NewBufferString(tc.requestBody)) + if tc.token != "" { + req = mux.SetURLVars(req, map[string]string{"token": tc.token}) + } + + rr := httptest.NewRecorder() + handler.acceptInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInviteAcceptResponse + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.True(t, resp.Success) + } + }) + } +} + +func TestRegenerateInvite(t *testing.T) { + now := time.Now().UTC() + expiresAt := now.Add(72 * time.Hour) + + tt := []struct { + name string + inviteID string + requestBody string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) + }{ + { + name: "successful regenerate with empty body", + inviteID: testInviteID, + requestBody: "", + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + assert.Equal(t, 0, expiresIn) + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: inviteID, + Email: testEmail, + }, + InviteToken: "nbi_newtoken12345678901234567890", + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "successful regenerate with custom expiration", + inviteID: testInviteID, + requestBody: `{"expires_in":7200}`, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + assert.Equal(t, 7200, expiresIn) + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: inviteID, + Email: testEmail, + }, + InviteToken: "nbi_newtoken12345678901234567890", + InviteExpiresAt: expiresAt, + }, nil + }, + }, + { + name: "invite not found", + inviteID: "non-existent-invite", + requestBody: "", + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + return nil, status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "permission denied", + inviteID: testInviteID, + requestBody: "", + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + return nil, status.NewPermissionDeniedError() + }, + }, + { + name: "missing invite ID", + inviteID: "", + requestBody: "", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + { + name: "invalid JSON should return error", + inviteID: testInviteID, + requestBody: `{invalid json}`, + expectedStatus: http.StatusBadRequest, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + RegenerateUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + var body io.Reader + if tc.requestBody != "" { + body = bytes.NewBufferString(tc.requestBody) + } + + req := httptest.NewRequest(http.MethodPost, "/api/users/invites/"+tc.inviteID+"/regenerate", body) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + if tc.inviteID != "" { + req = mux.SetURLVars(req, map[string]string{"inviteId": tc.inviteID}) + } + + rr := httptest.NewRecorder() + handler.regenerateInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectedStatus == http.StatusOK { + var resp api.UserInviteRegenerateResponse + err := json.NewDecoder(rr.Body).Decode(&resp) + require.NoError(t, err) + assert.NotEmpty(t, resp.InviteToken) + } + }) + } +} + +func TestDeleteInvite(t *testing.T) { + tt := []struct { + name string + inviteID string + expectedStatus int + mockFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error + }{ + { + name: "successful delete", + inviteID: testInviteID, + expectedStatus: http.StatusOK, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return nil + }, + }, + { + name: "invite not found", + inviteID: "non-existent-invite", + expectedStatus: http.StatusNotFound, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return status.Errorf(status.NotFound, "invite not found") + }, + }, + { + name: "permission denied", + inviteID: testInviteID, + expectedStatus: http.StatusForbidden, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return status.NewPermissionDeniedError() + }, + }, + { + name: "embedded IDP not enabled", + inviteID: testInviteID, + expectedStatus: http.StatusPreconditionFailed, + mockFunc: func(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + }, + }, + { + name: "missing invite ID", + inviteID: "", + expectedStatus: http.StatusUnprocessableEntity, + mockFunc: nil, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + am := &mock_server.MockAccountManager{ + DeleteUserInviteFunc: tc.mockFunc, + } + handler := setupInvitesTestHandler(am) + + req := httptest.NewRequest(http.MethodDelete, "/api/users/invites/"+tc.inviteID, nil) + req = nbcontext.SetUserAuthInRequest(req, auth.UserAuth{ + UserId: testUserID, + AccountId: testAccountID, + }) + if tc.inviteID != "" { + req = mux.SetURLVars(req, map[string]string{"inviteId": tc.inviteID}) + } + + rr := httptest.NewRecorder() + handler.deleteInvite(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + }) + } +} diff --git a/management/server/http/handlers/users/users_handler.go b/management/server/http/handlers/users/users_handler.go index 7669d7404..40ad585d2 100644 --- a/management/server/http/handlers/users/users_handler.go +++ b/management/server/http/handlers/users/users_handler.go @@ -33,6 +33,7 @@ func AddEndpoints(accountManager account.Manager, router *mux.Router) { router.HandleFunc("/users/{userId}/invite", userHandler.inviteUser).Methods("POST", "OPTIONS") router.HandleFunc("/users/{userId}/approve", userHandler.approveUser).Methods("POST", "OPTIONS") router.HandleFunc("/users/{userId}/reject", userHandler.rejectUser).Methods("DELETE", "OPTIONS") + router.HandleFunc("/users/{userId}/password", userHandler.changePassword).Methods("PUT", "OPTIONS") addUsersTokensEndpoint(accountManager, router) } @@ -410,3 +411,46 @@ func (h *handler) rejectUser(w http.ResponseWriter, r *http.Request) { util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) } + +// passwordChangeRequest represents the request body for password change +type passwordChangeRequest struct { + OldPassword string `json:"old_password"` + NewPassword string `json:"new_password"` +} + +// changePassword is a PUT request to change user's password. +// Only available when embedded IDP is enabled. +// Users can only change their own password. +func (h *handler) changePassword(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPut { + util.WriteErrorResponse("wrong HTTP method", http.StatusMethodNotAllowed, w) + return + } + + vars := mux.Vars(r) + targetUserID := vars["userId"] + if len(targetUserID) == 0 { + util.WriteErrorResponse("invalid user ID", http.StatusBadRequest, w) + return + } + + userAuth, err := nbcontext.GetUserAuthFromContext(r.Context()) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + var req passwordChangeRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + util.WriteErrorResponse("couldn't parse JSON request", http.StatusBadRequest, w) + return + } + + err = h.accountManager.UpdateUserPassword(r.Context(), userAuth.AccountId, userAuth.UserId, targetUserID, req.OldPassword, req.NewPassword) + if err != nil { + util.WriteError(r.Context(), err, w) + return + } + + util.WriteJSONObject(r.Context(), w, util.EmptyObject{}) +} diff --git a/management/server/http/handlers/users/users_handler_test.go b/management/server/http/handlers/users/users_handler_test.go index 37f0a6c1d..aa77dd843 100644 --- a/management/server/http/handlers/users/users_handler_test.go +++ b/management/server/http/handlers/users/users_handler_test.go @@ -856,3 +856,118 @@ func TestRejectUserEndpoint(t *testing.T) { }) } } + +func TestChangePasswordEndpoint(t *testing.T) { + tt := []struct { + name string + expectedStatus int + requestBody string + targetUserID string + currentUserID string + mockError error + expectMockNotCalled bool + }{ + { + name: "successful password change", + expectedStatus: http.StatusOK, + requestBody: `{"old_password": "OldPass123!", "new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: nil, + }, + { + name: "missing old password", + expectedStatus: http.StatusUnprocessableEntity, + requestBody: `{"new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.InvalidArgument, "old password is required"), + }, + { + name: "missing new password", + expectedStatus: http.StatusUnprocessableEntity, + requestBody: `{"old_password": "OldPass123!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.InvalidArgument, "new password is required"), + }, + { + name: "wrong old password", + expectedStatus: http.StatusUnprocessableEntity, + requestBody: `{"old_password": "WrongPass!", "new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.InvalidArgument, "invalid password"), + }, + { + name: "embedded IDP not enabled", + expectedStatus: http.StatusPreconditionFailed, + requestBody: `{"old_password": "OldPass123!", "new_password": "NewPass456!"}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + mockError: status.Errorf(status.PreconditionFailed, "password change is only available with embedded identity provider"), + }, + { + name: "invalid JSON request", + expectedStatus: http.StatusBadRequest, + requestBody: `{invalid json}`, + targetUserID: existingUserID, + currentUserID: existingUserID, + expectMockNotCalled: true, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + mockCalled := false + am := &mock_server.MockAccountManager{} + am.UpdateUserPasswordFunc = func(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error { + mockCalled = true + return tc.mockError + } + + handler := newHandler(am) + router := mux.NewRouter() + router.HandleFunc("/users/{userId}/password", handler.changePassword).Methods("PUT") + + reqPath := "/users/" + tc.targetUserID + "/password" + req, err := http.NewRequest("PUT", reqPath, bytes.NewBufferString(tc.requestBody)) + require.NoError(t, err) + + userAuth := auth.UserAuth{ + AccountId: existingAccountID, + UserId: tc.currentUserID, + } + ctx := nbcontext.SetUserAuthInContext(req.Context(), userAuth) + req = req.WithContext(ctx) + + rr := httptest.NewRecorder() + router.ServeHTTP(rr, req) + + assert.Equal(t, tc.expectedStatus, rr.Code) + + if tc.expectMockNotCalled { + assert.False(t, mockCalled, "mock should not have been called") + } + }) + } +} + +func TestChangePasswordEndpoint_WrongMethod(t *testing.T) { + am := &mock_server.MockAccountManager{} + handler := newHandler(am) + + req, err := http.NewRequest("POST", "/users/test-user/password", bytes.NewBufferString(`{}`)) + require.NoError(t, err) + + userAuth := auth.UserAuth{ + AccountId: existingAccountID, + UserId: existingUserID, + } + req = nbcontext.SetUserAuthInRequest(req, userAuth) + + rr := httptest.NewRecorder() + handler.changePassword(rr, req) + + assert.Equal(t, http.StatusMethodNotAllowed, rr.Code) +} diff --git a/management/server/http/middleware/rate_limiter.go b/management/server/http/middleware/rate_limiter.go index a6266d4f3..936b34319 100644 --- a/management/server/http/middleware/rate_limiter.go +++ b/management/server/http/middleware/rate_limiter.go @@ -2,10 +2,14 @@ package middleware import ( "context" + "net" + "net/http" "sync" "time" "golang.org/x/time/rate" + + "github.com/netbirdio/netbird/shared/management/http/util" ) // RateLimiterConfig holds configuration for the API rate limiter @@ -144,3 +148,25 @@ func (rl *APIRateLimiter) Reset(key string) { defer rl.mu.Unlock() delete(rl.limiters, key) } + +// Middleware returns an HTTP middleware that rate limits requests by client IP. +// Returns 429 Too Many Requests if the rate limit is exceeded. +func (rl *APIRateLimiter) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + clientIP := getClientIP(r) + if !rl.Allow(clientIP) { + util.WriteErrorResponse("rate limit exceeded, please try again later", http.StatusTooManyRequests, w) + return + } + next.ServeHTTP(w, r) + }) +} + +// getClientIP extracts the client IP address from the request. +func getClientIP(r *http.Request) string { + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return r.RemoteAddr + } + return ip +} diff --git a/management/server/http/middleware/rate_limiter_test.go b/management/server/http/middleware/rate_limiter_test.go new file mode 100644 index 000000000..68f804e57 --- /dev/null +++ b/management/server/http/middleware/rate_limiter_test.go @@ -0,0 +1,158 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestAPIRateLimiter_Allow(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, // 1 per second + Burst: 2, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + // First two requests should be allowed (burst) + assert.True(t, rl.Allow("test-key")) + assert.True(t, rl.Allow("test-key")) + + // Third request should be denied (exceeded burst) + assert.False(t, rl.Allow("test-key")) + + // Different key should be allowed + assert.True(t, rl.Allow("different-key")) +} + +func TestAPIRateLimiter_Middleware(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, // 1 per second + Burst: 2, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + // Create a simple handler that returns 200 OK + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + // Wrap with rate limiter middleware + handler := rl.Middleware(nextHandler) + + // First two requests should pass (burst) + for i := 0; i < 2; i++ { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, http.StatusOK, rr.Code, "request %d should be allowed", i+1) + } + + // Third request should be rate limited + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + assert.Equal(t, http.StatusTooManyRequests, rr.Code) +} + +func TestAPIRateLimiter_Middleware_DifferentIPs(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + handler := rl.Middleware(nextHandler) + + // Request from first IP + req1 := httptest.NewRequest(http.MethodGet, "/test", nil) + req1.RemoteAddr = "192.168.1.1:12345" + rr1 := httptest.NewRecorder() + handler.ServeHTTP(rr1, req1) + assert.Equal(t, http.StatusOK, rr1.Code) + + // Second request from first IP should be rate limited + req2 := httptest.NewRequest(http.MethodGet, "/test", nil) + req2.RemoteAddr = "192.168.1.1:12345" + rr2 := httptest.NewRecorder() + handler.ServeHTTP(rr2, req2) + assert.Equal(t, http.StatusTooManyRequests, rr2.Code) + + // Request from different IP should be allowed + req3 := httptest.NewRequest(http.MethodGet, "/test", nil) + req3.RemoteAddr = "192.168.1.2:12345" + rr3 := httptest.NewRecorder() + handler.ServeHTTP(rr3, req3) + assert.Equal(t, http.StatusOK, rr3.Code) +} + +func TestGetClientIP(t *testing.T) { + tests := []struct { + name string + remoteAddr string + expected string + }{ + { + name: "remote addr with port", + remoteAddr: "192.168.1.1:12345", + expected: "192.168.1.1", + }, + { + name: "remote addr without port", + remoteAddr: "192.168.1.1", + expected: "192.168.1.1", + }, + { + name: "IPv6 with port", + remoteAddr: "[::1]:12345", + expected: "::1", + }, + { + name: "IPv6 without port", + remoteAddr: "::1", + expected: "::1", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = tc.remoteAddr + assert.Equal(t, tc.expected, getClientIP(req)) + }) + } +} + +func TestAPIRateLimiter_Reset(t *testing.T) { + rl := NewAPIRateLimiter(&RateLimiterConfig{ + RequestsPerMinute: 60, + Burst: 1, + CleanupInterval: time.Minute, + LimiterTTL: time.Minute, + }) + defer rl.Stop() + + // Use up the burst + assert.True(t, rl.Allow("test-key")) + assert.False(t, rl.Allow("test-key")) + + // Reset the limiter + rl.Reset("test-key") + + // Should be allowed again + assert.True(t, rl.Allow("test-key")) +} diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 8c8f1a7b2..9339c3541 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/netbirdio/management-integrations/integrations" + zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager" recordsManager "github.com/netbirdio/netbird/management/internals/modules/zones/records/manager" "github.com/netbirdio/netbird/management/internals/server/config" @@ -20,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/modules/peers" ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/account" @@ -72,11 +74,14 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee userManager := users.NewManager(store) permissionsManager := permissions.NewManager(store) settingsManager := settings.NewManager(store, userManager, integrations.NewManager(&activity.InMemoryEventStore{}), permissionsManager) + peersManager := peers.NewManager(store, permissionsManager) + + jobManager := job.NewJobManager(nil, store, peersManager) ctx := context.Background() requestBuffer := server.NewAccountRequestBuffer(ctx, store) - networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - am, err := server.BuildManager(ctx, nil, store, networkMapController, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) + networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManager), &config.Config{}) + am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) if err != nil { t.Fatalf("Failed to create manager: %v", err) } @@ -94,7 +99,6 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee resourcesManagerMock := resources.NewManagerMock() routersManagerMock := routers.NewManagerMock() groupsManagerMock := groups.NewManagerMock() - peersManager := peers.NewManager(store, permissionsManager) customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) diff --git a/management/server/identity_provider_test.go b/management/server/identity_provider_test.go index 78dcbeb74..9fce6b9c0 100644 --- a/management/server/identity_provider_test.go +++ b/management/server/identity_provider_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" @@ -80,11 +81,12 @@ func createManagerWithEmbeddedIdP(t testing.TB) (*DefaultAccountManager, *update AnyTimes() permissionsManager := permissions.NewManager(testStore) + peersManager := peers.NewManager(testStore, permissionsManager) updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, testStore) - networkMapController := controller.NewController(ctx, testStore, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(testStore, peers.NewManager(testStore, permissionsManager)), &config.Config{}) - manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + networkMapController := controller.NewController(ctx, testStore, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(testStore, peersManager), &config.Config{}) + manager, err := BuildManager(ctx, &config.Config{}, testStore, networkMapController, job.NewJobManager(nil, testStore, peersManager), idpManager, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, nil, err } diff --git a/management/server/idp/auth0.go b/management/server/idp/auth0.go index 1eb8434d3..0d4461e89 100644 --- a/management/server/idp/auth0.go +++ b/management/server/idp/auth0.go @@ -135,10 +135,11 @@ func NewAuth0Manager(config Auth0ClientConfig, appMetrics telemetry.AppMetrics) httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 - httpClient := &http.Client{ - Timeout: 10 * time.Second, + httpClient := &http.Client{ + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.AuthIssuer == "" { diff --git a/management/server/idp/authentik.go b/management/server/idp/authentik.go index 2f87a9bba..0f30cc63d 100644 --- a/management/server/idp/authentik.go +++ b/management/server/idp/authentik.go @@ -48,16 +48,15 @@ type AuthentikCredentials struct { } // NewAuthentikManager creates a new instance of the AuthentikManager. -func NewAuthentikManager(config AuthentikClientConfig, - appMetrics telemetry.AppMetrics) (*AuthentikManager, error) { +func NewAuthentikManager(config AuthentikClientConfig, appMetrics telemetry.AppMetrics) (*AuthentikManager, error) { httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } - + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/azure.go b/management/server/idp/azure.go index 393a39e3e..e098424b5 100644 --- a/management/server/idp/azure.go +++ b/management/server/idp/azure.go @@ -57,10 +57,11 @@ func NewAzureManager(config AzureClientConfig, appMetrics telemetry.AppMetrics) httpTransport := http.DefaultTransport.(*http.Transport).Clone() httpTransport.MaxIdleConns = 5 - httpClient := &http.Client{ - Timeout: 10 * time.Second, + httpClient := &http.Client{ + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 0e46b506e..db7a91fa3 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -20,7 +20,7 @@ const ( staticClientCLI = "netbird-cli" defaultCLIRedirectURL1 = "http://localhost:53000/" defaultCLIRedirectURL2 = "http://localhost:54000/" - defaultScopes = "openid profile email" + defaultScopes = "openid profile email groups" defaultUserIDClaim = "sub" ) @@ -400,7 +400,6 @@ func (m *EmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, // InviteUserByID resends an invitation to a user. func (m *EmbeddedIdPManager) InviteUserByID(ctx context.Context, userID string) error { - // TODO: implement return fmt.Errorf("not implemented") } @@ -432,6 +431,33 @@ func (m *EmbeddedIdPManager) DeleteUser(ctx context.Context, userID string) erro return nil } +// UpdateUserPassword updates the password for a user in the embedded IdP. +// It verifies that the current user is changing their own password and +// validates the current password before updating to the new password. +func (m *EmbeddedIdPManager) UpdateUserPassword(ctx context.Context, currentUserID, targetUserID string, oldPassword, newPassword string) error { + // Verify the user is changing their own password + if currentUserID != targetUserID { + return fmt.Errorf("users can only change their own password") + } + + // Verify the new password is different from the old password + if oldPassword == newPassword { + return fmt.Errorf("new password must be different from current password") + } + + err := m.provider.UpdateUserPassword(ctx, targetUserID, oldPassword, newPassword) + if err != nil { + if m.appMetrics != nil { + m.appMetrics.IDPMetrics().CountRequestError() + } + return err + } + + log.WithContext(ctx).Debugf("updated password for user %s in embedded IdP", targetUserID) + + return nil +} + // CreateConnector creates a new identity provider connector in Dex. // Returns the created connector config with the redirect URL populated. func (m *EmbeddedIdPManager) CreateConnector(ctx context.Context, cfg *dex.ConnectorConfig) (*dex.ConnectorConfig, error) { @@ -449,15 +475,8 @@ func (m *EmbeddedIdPManager) ListConnectors(ctx context.Context) ([]*dex.Connect } // UpdateConnector updates an existing identity provider connector. +// Field preservation for partial updates is handled by Provider.UpdateConnector. func (m *EmbeddedIdPManager) UpdateConnector(ctx context.Context, cfg *dex.ConnectorConfig) error { - // Preserve existing secret if not provided in update - if cfg.ClientSecret == "" { - existing, err := m.provider.GetConnector(ctx, cfg.ID) - if err != nil { - return fmt.Errorf("failed to get existing connector: %w", err) - } - cfg.ClientSecret = existing.ClientSecret - } return m.provider.UpdateConnector(ctx, cfg) } diff --git a/management/server/idp/embedded_test.go b/management/server/idp/embedded_test.go index 04e3f0699..d8d3009dd 100644 --- a/management/server/idp/embedded_test.go +++ b/management/server/idp/embedded_test.go @@ -248,6 +248,71 @@ func TestEmbeddedIdPManager_UserIDFormat_MatchesJWT(t *testing.T) { t.Logf(" Connector: %s", connectorID) } +func TestEmbeddedIdPManager_UpdateUserPassword(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "embedded-idp-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + config := &EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: EmbeddedStorageConfig{ + Type: "sqlite3", + Config: EmbeddedStorageTypeConfig{ + File: filepath.Join(tmpDir, "dex.db"), + }, + }, + } + + manager, err := NewEmbeddedIdPManager(ctx, config, nil) + require.NoError(t, err) + defer func() { _ = manager.Stop(ctx) }() + + // Create a user with a known password + email := "password-test@example.com" + name := "Password Test User" + initialPassword := "InitialPass123!" + + userData, err := manager.CreateUserWithPassword(ctx, email, initialPassword, name) + require.NoError(t, err) + require.NotNil(t, userData) + + userID := userData.ID + + t.Run("successful password change", func(t *testing.T) { + newPassword := "NewSecurePass456!" + err := manager.UpdateUserPassword(ctx, userID, userID, initialPassword, newPassword) + require.NoError(t, err) + + // Verify the new password works by changing it again + anotherPassword := "AnotherPass789!" + err = manager.UpdateUserPassword(ctx, userID, userID, newPassword, anotherPassword) + require.NoError(t, err) + }) + + t.Run("wrong old password", func(t *testing.T) { + err := manager.UpdateUserPassword(ctx, userID, userID, "wrongpassword", "NewPass123!") + require.Error(t, err) + assert.Contains(t, err.Error(), "current password is incorrect") + }) + + t.Run("cannot change other user password", func(t *testing.T) { + otherUserID := "other-user-id" + err := manager.UpdateUserPassword(ctx, userID, otherUserID, "oldpass", "newpass") + require.Error(t, err) + assert.Contains(t, err.Error(), "users can only change their own password") + }) + + t.Run("same password rejected", func(t *testing.T) { + samePassword := "SamePass123!" + err := manager.UpdateUserPassword(ctx, userID, userID, samePassword, samePassword) + require.Error(t, err) + assert.Contains(t, err.Error(), "new password must be different") + }) +} + func TestEmbeddedIdPManager_GetLocalKeysLocation(t *testing.T) { ctx := context.Background() diff --git a/management/server/idp/google_workspace.go b/management/server/idp/google_workspace.go index 09ea8c430..6e417d394 100644 --- a/management/server/idp/google_workspace.go +++ b/management/server/idp/google_workspace.go @@ -5,7 +5,6 @@ import ( "encoding/base64" "fmt" "net/http" - "time" log "github.com/sirupsen/logrus" "golang.org/x/oauth2/google" @@ -49,9 +48,10 @@ func NewGoogleWorkspaceManager(ctx context.Context, config GoogleWorkspaceClient httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.CustomerID == "" { diff --git a/management/server/idp/jumpcloud.go b/management/server/idp/jumpcloud.go index 6345e424a..8c4a9d089 100644 --- a/management/server/idp/jumpcloud.go +++ b/management/server/idp/jumpcloud.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" "strings" - "time" v1 "github.com/TheJumpCloud/jcapi-go/v1" @@ -46,9 +45,10 @@ func NewJumpCloudManager(config JumpCloudClientConfig, appMetrics telemetry.AppM httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.APIToken == "" { diff --git a/management/server/idp/keycloak.go b/management/server/idp/keycloak.go index c611317ab..b640f7520 100644 --- a/management/server/idp/keycloak.go +++ b/management/server/idp/keycloak.go @@ -63,9 +63,10 @@ func NewKeycloakManager(config KeycloakClientConfig, appMetrics telemetry.AppMet httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.ClientID == "" { diff --git a/management/server/idp/okta.go b/management/server/idp/okta.go index b9cd006be..07f0d8008 100644 --- a/management/server/idp/okta.go +++ b/management/server/idp/okta.go @@ -6,7 +6,6 @@ import ( "net/http" "net/url" "strings" - "time" "github.com/okta/okta-sdk-golang/v2/okta" "github.com/okta/okta-sdk-golang/v2/okta/query" @@ -45,7 +44,7 @@ func NewOktaManager(config OktaClientConfig, appMetrics telemetry.AppMetrics) (* httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } diff --git a/management/server/idp/pocketid.go b/management/server/idp/pocketid.go index d8d764830..ee8e304ee 100644 --- a/management/server/idp/pocketid.go +++ b/management/server/idp/pocketid.go @@ -8,7 +8,6 @@ import ( "net/url" "slices" "strings" - "time" "github.com/netbirdio/netbird/management/server/telemetry" ) @@ -88,9 +87,10 @@ func NewPocketIdManager(config PocketIdClientConfig, appMetrics telemetry.AppMet httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} if config.ManagementEndpoint == "" { diff --git a/management/server/idp/util.go b/management/server/idp/util.go index df1497114..4310d1388 100644 --- a/management/server/idp/util.go +++ b/management/server/idp/util.go @@ -4,7 +4,9 @@ import ( "encoding/json" "math/rand" "net/url" + "os" "strings" + "time" ) var ( @@ -69,3 +71,24 @@ func baseURL(rawURL string) string { return parsedURL.Scheme + "://" + parsedURL.Host } + +const ( + // Provides the env variable name for use with idpTimeout function + idpTimeoutEnv = "NB_IDP_TIMEOUT" + // Sets the defaultTimeout to 10s. + defaultTimeout = 10 * time.Second +) + +// idpTimeout returns a timeout value for the IDP +func idpTimeout() time.Duration { + timeoutStr, ok := os.LookupEnv(idpTimeoutEnv) + if !ok || timeoutStr == "" { + return defaultTimeout + } + + timeout, err := time.ParseDuration(timeoutStr) + if err != nil { + return defaultTimeout + } + return timeout +} diff --git a/management/server/idp/zitadel.go b/management/server/idp/zitadel.go index 8db3c4796..ea0fd0aa7 100644 --- a/management/server/idp/zitadel.go +++ b/management/server/idp/zitadel.go @@ -164,9 +164,10 @@ func NewZitadelManager(config ZitadelClientConfig, appMetrics telemetry.AppMetri httpTransport.MaxIdleConns = 5 httpClient := &http.Client{ - Timeout: 10 * time.Second, + Timeout: idpTimeout(), Transport: httpTransport, } + helper := JsonParser{} hasPAT := config.PAT != "" diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go index 6f50e3ff7..6a0509ebd 100644 --- a/management/server/instance/manager.go +++ b/management/server/instance/manager.go @@ -2,18 +2,54 @@ package instance import ( "context" + "encoding/json" "errors" "fmt" + "io" + "net/http" "net/mail" + "strings" "sync" + "time" + goversion "github.com/hashicorp/go-version" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/management/server/idp" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/shared/management/status" + "github.com/netbirdio/netbird/version" ) +const ( + // Version endpoints + managementVersionURL = "https://pkgs.netbird.io/releases/latest/version" + dashboardReleasesURL = "https://api.github.com/repos/netbirdio/dashboard/releases/latest" + + // Cache TTL for version information + versionCacheTTL = 60 * time.Minute + + // HTTP client timeout + httpTimeout = 5 * time.Second +) + +// VersionInfo contains version information for NetBird components +type VersionInfo struct { + // CurrentVersion is the running management server version + CurrentVersion string + // DashboardVersion is the latest available dashboard version from GitHub + DashboardVersion string + // ManagementVersion is the latest available management version from GitHub + ManagementVersion string + // ManagementUpdateAvailable indicates if a newer management version is available + ManagementUpdateAvailable bool +} + +// githubRelease represents a GitHub release response +type githubRelease struct { + TagName string `json:"tag_name"` +} + // Manager handles instance-level operations like initial setup. type Manager interface { // IsSetupRequired checks if instance setup is required. @@ -23,6 +59,9 @@ type Manager interface { // CreateOwnerUser creates the initial owner user in the embedded IDP. // This should only be called when IsSetupRequired returns true. CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) + + // GetVersionInfo returns version information for NetBird components. + GetVersionInfo(ctx context.Context) (*VersionInfo, error) } // DefaultManager is the default implementation of Manager. @@ -32,6 +71,12 @@ type DefaultManager struct { setupRequired bool setupMu sync.RWMutex + + // Version caching + httpClient *http.Client + versionMu sync.RWMutex + cachedVersions *VersionInfo + lastVersionFetch time.Time } // NewManager creates a new instance manager. @@ -43,6 +88,9 @@ func NewManager(ctx context.Context, store store.Store, idpManager idp.Manager) store: store, embeddedIdpManager: embeddedIdp, setupRequired: false, + httpClient: &http.Client{ + Timeout: httpTimeout, + }, } if embeddedIdp != nil { @@ -134,3 +182,130 @@ func (m *DefaultManager) validateSetupInfo(email, password, name string) error { } return nil } + +// GetVersionInfo returns version information for NetBird components. +func (m *DefaultManager) GetVersionInfo(ctx context.Context) (*VersionInfo, error) { + m.versionMu.RLock() + if m.cachedVersions != nil && time.Since(m.lastVersionFetch) < versionCacheTTL { + cached := *m.cachedVersions + m.versionMu.RUnlock() + return &cached, nil + } + m.versionMu.RUnlock() + + return m.fetchVersionInfo(ctx) +} + +func (m *DefaultManager) fetchVersionInfo(ctx context.Context) (*VersionInfo, error) { + m.versionMu.Lock() + // Double-check after acquiring write lock + if m.cachedVersions != nil && time.Since(m.lastVersionFetch) < versionCacheTTL { + cached := *m.cachedVersions + m.versionMu.Unlock() + return &cached, nil + } + m.versionMu.Unlock() + + info := &VersionInfo{ + CurrentVersion: version.NetbirdVersion(), + } + + // Fetch management version from pkgs.netbird.io (plain text) + mgmtVersion, err := m.fetchPlainTextVersion(ctx, managementVersionURL) + if err != nil { + log.WithContext(ctx).Warnf("failed to fetch management version: %v", err) + } else { + info.ManagementVersion = mgmtVersion + info.ManagementUpdateAvailable = isNewerVersion(info.CurrentVersion, mgmtVersion) + } + + // Fetch dashboard version from GitHub + dashVersion, err := m.fetchGitHubRelease(ctx, dashboardReleasesURL) + if err != nil { + log.WithContext(ctx).Warnf("failed to fetch dashboard version from GitHub: %v", err) + } else { + info.DashboardVersion = dashVersion + } + + // Update cache + m.versionMu.Lock() + m.cachedVersions = info + m.lastVersionFetch = time.Now() + m.versionMu.Unlock() + + return info, nil +} + +// isNewerVersion returns true if latestVersion is greater than currentVersion +func isNewerVersion(currentVersion, latestVersion string) bool { + current, err := goversion.NewVersion(currentVersion) + if err != nil { + return false + } + + latest, err := goversion.NewVersion(latestVersion) + if err != nil { + return false + } + + return latest.GreaterThan(current) +} + +func (m *DefaultManager) fetchPlainTextVersion(ctx context.Context, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("create request: %w", err) + } + + req.Header.Set("User-Agent", "NetBird-Management/"+version.NetbirdVersion()) + + resp, err := m.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + body, err := io.ReadAll(io.LimitReader(resp.Body, 100)) + if err != nil { + return "", fmt.Errorf("read response: %w", err) + } + + return strings.TrimSpace(string(body)), nil +} + +func (m *DefaultManager) fetchGitHubRelease(ctx context.Context, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("create request: %w", err) + } + + req.Header.Set("Accept", "application/vnd.github.v3+json") + req.Header.Set("User-Agent", "NetBird-Management/"+version.NetbirdVersion()) + + resp, err := m.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("execute request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + var release githubRelease + if err := json.NewDecoder(resp.Body).Decode(&release); err != nil { + return "", fmt.Errorf("decode response: %w", err) + } + + // Remove 'v' prefix if present + tag := release.TagName + if len(tag) > 0 && tag[0] == 'v' { + tag = tag[1:] + } + + return tag, nil +} diff --git a/management/server/instance/version_test.go b/management/server/instance/version_test.go new file mode 100644 index 000000000..35ba66db8 --- /dev/null +++ b/management/server/instance/version_test.go @@ -0,0 +1,285 @@ +package instance + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockRoundTripper implements http.RoundTripper for testing +type mockRoundTripper struct { + callCount atomic.Int32 + managementVersion string + dashboardVersion string +} + +func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + m.callCount.Add(1) + + var body string + if strings.Contains(req.URL.String(), "pkgs.netbird.io") { + // Plain text response for management version + body = m.managementVersion + } else if strings.Contains(req.URL.String(), "github.com") { + // JSON response for dashboard version + jsonResp, _ := json.Marshal(githubRelease{TagName: "v" + m.dashboardVersion}) + body = string(jsonResp) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewBufferString(body)), + Header: make(http.Header), + }, nil +} + +func TestDefaultManager_GetVersionInfo_ReturnsCurrentVersion(t *testing.T) { + mockTransport := &mockRoundTripper{ + managementVersion: "0.65.0", + dashboardVersion: "2.10.0", + } + + m := &DefaultManager{ + httpClient: &http.Client{Transport: mockTransport}, + } + + ctx := context.Background() + + info, err := m.GetVersionInfo(ctx) + require.NoError(t, err) + + // CurrentVersion should always be set + assert.NotEmpty(t, info.CurrentVersion) + assert.Equal(t, "0.65.0", info.ManagementVersion) + assert.Equal(t, "2.10.0", info.DashboardVersion) + assert.Equal(t, int32(2), mockTransport.callCount.Load()) // 2 calls: management + dashboard +} + +func TestDefaultManager_GetVersionInfo_CachesResults(t *testing.T) { + mockTransport := &mockRoundTripper{ + managementVersion: "0.65.0", + dashboardVersion: "2.10.0", + } + + m := &DefaultManager{ + httpClient: &http.Client{Transport: mockTransport}, + } + + ctx := context.Background() + + // First call + info1, err := m.GetVersionInfo(ctx) + require.NoError(t, err) + assert.NotEmpty(t, info1.CurrentVersion) + assert.Equal(t, "0.65.0", info1.ManagementVersion) + + initialCallCount := mockTransport.callCount.Load() + + // Second call should use cache (no additional HTTP calls) + info2, err := m.GetVersionInfo(ctx) + require.NoError(t, err) + assert.Equal(t, info1.CurrentVersion, info2.CurrentVersion) + assert.Equal(t, info1.ManagementVersion, info2.ManagementVersion) + assert.Equal(t, info1.DashboardVersion, info2.DashboardVersion) + + // Verify no additional HTTP calls were made (cache was used) + assert.Equal(t, initialCallCount, mockTransport.callCount.Load()) +} + +func TestDefaultManager_FetchGitHubRelease_ParsesTagName(t *testing.T) { + tests := []struct { + name string + tagName string + expected string + shouldError bool + }{ + { + name: "tag with v prefix", + tagName: "v1.2.3", + expected: "1.2.3", + }, + { + name: "tag without v prefix", + tagName: "1.2.3", + expected: "1.2.3", + }, + { + name: "tag with prerelease", + tagName: "v2.0.0-beta.1", + expected: "2.0.0-beta.1", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(githubRelease{TagName: tc.tagName}) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + version, err := m.fetchGitHubRelease(context.Background(), server.URL) + + if tc.shouldError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expected, version) + } + }) + } +} + +func TestDefaultManager_FetchGitHubRelease_HandlesErrors(t *testing.T) { + tests := []struct { + name string + statusCode int + body string + }{ + { + name: "not found", + statusCode: http.StatusNotFound, + body: `{"message": "Not Found"}`, + }, + { + name: "rate limited", + statusCode: http.StatusForbidden, + body: `{"message": "API rate limit exceeded"}`, + }, + { + name: "server error", + statusCode: http.StatusInternalServerError, + body: `{"message": "Internal Server Error"}`, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tc.statusCode) + _, _ = w.Write([]byte(tc.body)) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + _, err := m.fetchGitHubRelease(context.Background(), server.URL) + assert.Error(t, err) + }) + } +} + +func TestDefaultManager_FetchGitHubRelease_InvalidJSON(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{invalid json}`)) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + _, err := m.fetchGitHubRelease(context.Background(), server.URL) + assert.Error(t, err) +} + +func TestDefaultManager_FetchGitHubRelease_ContextCancellation(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(githubRelease{TagName: "v1.0.0"}) + })) + defer server.Close() + + m := &DefaultManager{ + httpClient: &http.Client{Timeout: 5 * time.Second}, + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + _, err := m.fetchGitHubRelease(ctx, server.URL) + assert.Error(t, err) +} + +func TestIsNewerVersion(t *testing.T) { + tests := []struct { + name string + currentVersion string + latestVersion string + expected bool + }{ + { + name: "latest is newer - minor version", + currentVersion: "0.64.1", + latestVersion: "0.65.0", + expected: true, + }, + { + name: "latest is newer - patch version", + currentVersion: "0.64.1", + latestVersion: "0.64.2", + expected: true, + }, + { + name: "latest is newer - major version", + currentVersion: "0.64.1", + latestVersion: "1.0.0", + expected: true, + }, + { + name: "versions are equal", + currentVersion: "0.64.1", + latestVersion: "0.64.1", + expected: false, + }, + { + name: "current is newer - minor version", + currentVersion: "0.65.0", + latestVersion: "0.64.1", + expected: false, + }, + { + name: "current is newer - patch version", + currentVersion: "0.64.2", + latestVersion: "0.64.1", + expected: false, + }, + { + name: "development version", + currentVersion: "development", + latestVersion: "0.65.0", + expected: false, + }, + { + name: "invalid latest version", + currentVersion: "0.64.1", + latestVersion: "invalid", + expected: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := isNewerVersion(tc.currentVersion, tc.latestVersion) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/management/server/job/channel.go b/management/server/job/channel.go new file mode 100644 index 000000000..c4dc98a68 --- /dev/null +++ b/management/server/job/channel.go @@ -0,0 +1,59 @@ +package job + +import ( + "context" + "errors" + "fmt" + "sync" + "time" +) + +// todo consider the channel buffer size when we allow to run multiple jobs +const jobChannelBuffer = 1 + +var ( + ErrJobChannelClosed = errors.New("job channel closed") +) + +type Channel struct { + events chan *Event + once sync.Once +} + +func NewChannel() *Channel { + jc := &Channel{ + events: make(chan *Event, jobChannelBuffer), + } + + return jc +} + +func (jc *Channel) AddEvent(ctx context.Context, responseWait time.Duration, event *Event) error { + select { + case <-ctx.Done(): + return ctx.Err() + // todo: timeout is handled in the wrong place. If the peer does not respond with the job response, the server does not clean it up from the pending jobs and cannot apply a new job + case <-time.After(responseWait): + return fmt.Errorf("failed to add the event to the channel") + case jc.events <- event: + } + return nil +} + +func (jc *Channel) Close() { + jc.once.Do(func() { + close(jc.events) + }) +} + +func (jc *Channel) Event(ctx context.Context) (*Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case job, open := <-jc.events: + if !open { + return nil, ErrJobChannelClosed + } + return job, nil + } +} diff --git a/management/server/job/manager.go b/management/server/job/manager.go new file mode 100644 index 000000000..0b183ac39 --- /dev/null +++ b/management/server/job/manager.go @@ -0,0 +1,182 @@ +package job + +import ( + "context" + "fmt" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/management/internals/modules/peers" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/telemetry" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/proto" +) + +type Event struct { + PeerID string + Request *proto.JobRequest + Response *proto.JobResponse +} + +type Manager struct { + mu *sync.RWMutex + jobChannels map[string]*Channel // per-peer job streams + pending map[string]*Event // jobID → event + responseWait time.Duration + metrics telemetry.AppMetrics + Store store.Store + peersManager peers.Manager +} + +func NewJobManager(metrics telemetry.AppMetrics, store store.Store, peersManager peers.Manager) *Manager { + + return &Manager{ + jobChannels: make(map[string]*Channel), + pending: make(map[string]*Event), + responseWait: 5 * time.Minute, + metrics: metrics, + mu: &sync.RWMutex{}, + Store: store, + peersManager: peersManager, + } +} + +// CreateJobChannel creates or replaces a channel for a peer +func (jm *Manager) CreateJobChannel(ctx context.Context, accountID, peerID string) *Channel { + // all pending jobs stored in db for this peer should be failed + if err := jm.Store.MarkAllPendingJobsAsFailed(ctx, accountID, peerID, "Pending job cleanup: marked as failed automatically due to being stuck too long"); err != nil { + log.WithContext(ctx).Error(err.Error()) + } + + jm.mu.Lock() + defer jm.mu.Unlock() + + if ch, ok := jm.jobChannels[peerID]; ok { + ch.Close() + delete(jm.jobChannels, peerID) + } + + ch := NewChannel() + jm.jobChannels[peerID] = ch + return ch +} + +// SendJob sends a job to a peer and tracks it as pending +func (jm *Manager) SendJob(ctx context.Context, accountID, peerID string, req *proto.JobRequest) error { + jm.mu.RLock() + ch, ok := jm.jobChannels[peerID] + jm.mu.RUnlock() + if !ok { + return fmt.Errorf("peer %s has no channel", peerID) + } + + event := &Event{ + PeerID: peerID, + Request: req, + } + + jm.mu.Lock() + jm.pending[string(req.ID)] = event + jm.mu.Unlock() + + if err := ch.AddEvent(ctx, jm.responseWait, event); err != nil { + jm.cleanup(ctx, accountID, string(req.ID), err.Error()) + return err + } + + return nil +} + +// HandleResponse marks a job as finished and moves it to completed +func (jm *Manager) HandleResponse(ctx context.Context, resp *proto.JobResponse, peerKey string) error { + jm.mu.Lock() + defer jm.mu.Unlock() + + // todo: validate job ID and would be nice to use uuid text marshal instead of string + jobID := string(resp.ID) + + // todo: in this map has jobs for all peers in any account. Consider to validate the jobID association for the peer + event, ok := jm.pending[jobID] + if !ok { + return fmt.Errorf("job %s not found", jobID) + } + var job types.Job + // todo: ApplyResponse should be static. Any member value is unusable in this way + if err := job.ApplyResponse(resp); err != nil { + return fmt.Errorf("invalid job response: %v", err) + } + + peerID, err := jm.peersManager.GetPeerID(ctx, peerKey) + if err != nil { + return fmt.Errorf("failed to get peer ID: %v", err) + } + if peerID != event.PeerID { + return fmt.Errorf("peer ID mismatch: %s != %s", peerID, event.PeerID) + } + + // update or create the store for job response + err = jm.Store.CompletePeerJob(ctx, &job) + if err != nil { + return fmt.Errorf("failed to complete job %s: %v", jobID, err) + } + + delete(jm.pending, jobID) + return nil +} + +// CloseChannel closes a peer’s channel and cleans up its jobs +func (jm *Manager) CloseChannel(ctx context.Context, accountID, peerID string) { + jm.mu.Lock() + defer jm.mu.Unlock() + + if ch, ok := jm.jobChannels[peerID]; ok { + ch.Close() + delete(jm.jobChannels, peerID) + } + + for jobID, ev := range jm.pending { + if ev.PeerID == peerID { + // if the client disconnect and there is pending job then mark it as failed + if err := jm.Store.MarkPendingJobsAsFailed(ctx, accountID, peerID, jobID, "Time out peer disconnected"); err != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as failed: %v", err) + } + delete(jm.pending, jobID) + } + } +} + +// cleanup removes a pending job safely +func (jm *Manager) cleanup(ctx context.Context, accountID, jobID string, reason string) { + jm.mu.Lock() + defer jm.mu.Unlock() + + if ev, ok := jm.pending[jobID]; ok { + if err := jm.Store.MarkPendingJobsAsFailed(ctx, accountID, ev.PeerID, jobID, reason); err != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as failed: %v", err) + } + delete(jm.pending, jobID) + } +} + +func (jm *Manager) IsPeerConnected(peerID string) bool { + jm.mu.RLock() + defer jm.mu.RUnlock() + + _, ok := jm.jobChannels[peerID] + return ok +} + +func (jm *Manager) IsPeerHasPendingJobs(peerID string) bool { + jm.mu.RLock() + defer jm.mu.RUnlock() + + for _, ev := range jm.pending { + if ev.PeerID == peerID { + return true + } + } + return false +} diff --git a/management/server/management_proto_test.go b/management/server/management_proto_test.go index cc302400f..090c99877 100644 --- a/management/server/management_proto_test.go +++ b/management/server/management_proto_test.go @@ -31,6 +31,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" @@ -361,13 +362,15 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config AnyTimes() permissionsManager := permissions.NewManager(store) groupsManager := groups.NewManagerMock() + peersManager := peers.NewManager(store, permissionsManager) + jobManager := job.NewJobManager(nil, store, peersManager) updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) ephemeralMgr := manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeralMgr, config) - accountManager, err := BuildManager(ctx, nil, store, networkMapController, nil, "", + accountManager, err := BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { @@ -381,7 +384,7 @@ func startManagementForTest(t *testing.T, testFile string, config *config.Config return nil, nil, "", cleanup, err } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, MockIntegratedValidator{}, networkMapController, nil) if err != nil { return nil, nil, "", cleanup, err } diff --git a/management/server/management_test.go b/management/server/management_test.go index ace372509..0864baadf 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -30,6 +30,7 @@ import ( "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/groups" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/management/server/store" @@ -202,6 +203,8 @@ func startServer( AnyTimes() permissionsManager := permissions.NewManager(str) + peersManager := peers.NewManager(str, permissionsManager) + jobManager := job.NewJobManager(nil, str, peersManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) @@ -213,6 +216,7 @@ func startServer( nil, str, networkMapController, + jobManager, nil, "", eventStore, @@ -237,6 +241,7 @@ func startServer( config, accountManager, settingsMockManager, + jobManager, secretsManager, nil, nil, diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index 422829eba..026989898 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -74,6 +74,7 @@ type MockAccountManager struct { SaveOrAddUsersFunc func(ctx context.Context, accountID, initiatorUserID string, update []*types.User, addIfNotExists bool) ([]*types.UserInfo, error) DeleteUserFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error DeleteRegularUsersFunc func(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error + UpdateUserPasswordFunc func(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error CreatePATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) DeletePATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenID string) error GetPATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenID string) (*types.PersonalAccessToken, error) @@ -135,6 +136,35 @@ type MockAccountManager struct { CreateIdentityProviderFunc func(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) UpdateIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error) DeleteIdentityProviderFunc func(ctx context.Context, accountID, idpID, userID string) error + CreatePeerJobFunc func(ctx context.Context, accountID, peerID, userID string, job *types.Job) error + GetAllPeerJobsFunc func(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) + GetPeerJobByIDFunc func(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) + CreateUserInviteFunc func(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) + AcceptUserInviteFunc func(ctx context.Context, token, password string) error + RegenerateUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) + GetUserInviteInfoFunc func(ctx context.Context, token string) (*types.UserInviteInfo, error) + ListUserInvitesFunc func(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) + DeleteUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error +} + +func (am *MockAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { + if am.CreatePeerJobFunc != nil { + return am.CreatePeerJobFunc(ctx, accountID, peerID, userID, job) + } + return status.Errorf(codes.Unimplemented, "method CreatePeerJob is not implemented") +} + +func (am *MockAccountManager) GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) { + if am.GetAllPeerJobsFunc != nil { + return am.GetAllPeerJobsFunc(ctx, accountID, userID, peerID) + } + return nil, status.Errorf(codes.Unimplemented, "method GetAllPeerJobs is not implemented") +} +func (am *MockAccountManager) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) { + if am.GetPeerJobByIDFunc != nil { + return am.GetPeerJobByIDFunc(ctx, accountID, userID, peerID, jobID) + } + return nil, status.Errorf(codes.Unimplemented, "method GetPeerJobByID is not implemented") } func (am *MockAccountManager) CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error { @@ -612,6 +642,14 @@ func (am *MockAccountManager) DeleteRegularUsers(ctx context.Context, accountID, return status.Errorf(codes.Unimplemented, "method DeleteRegularUsers is not implemented") } +// UpdateUserPassword mocks UpdateUserPassword of the AccountManager interface +func (am *MockAccountManager) UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error { + if am.UpdateUserPasswordFunc != nil { + return am.UpdateUserPasswordFunc(ctx, accountID, currentUserID, targetUserID, oldPassword, newPassword) + } + return status.Errorf(codes.Unimplemented, "method UpdateUserPassword is not implemented") +} + func (am *MockAccountManager) InviteUser(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error { if am.InviteUserFunc != nil { return am.InviteUserFunc(ctx, accountID, initiatorUserID, targetUserID) @@ -681,6 +719,48 @@ func (am *MockAccountManager) CreateUser(ctx context.Context, accountID, userID return nil, status.Errorf(codes.Unimplemented, "method CreateUser is not implemented") } +func (am *MockAccountManager) CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + if am.CreateUserInviteFunc != nil { + return am.CreateUserInviteFunc(ctx, accountID, initiatorUserID, invite, expiresIn) + } + return nil, status.Errorf(codes.Unimplemented, "method CreateUserInvite is not implemented") +} + +func (am *MockAccountManager) AcceptUserInvite(ctx context.Context, token, password string) error { + if am.AcceptUserInviteFunc != nil { + return am.AcceptUserInviteFunc(ctx, token, password) + } + return status.Errorf(codes.Unimplemented, "method AcceptUserInvite is not implemented") +} + +func (am *MockAccountManager) RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + if am.RegenerateUserInviteFunc != nil { + return am.RegenerateUserInviteFunc(ctx, accountID, initiatorUserID, inviteID, expiresIn) + } + return nil, status.Errorf(codes.Unimplemented, "method RegenerateUserInvite is not implemented") +} + +func (am *MockAccountManager) GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) { + if am.GetUserInviteInfoFunc != nil { + return am.GetUserInviteInfoFunc(ctx, token) + } + return nil, status.Errorf(codes.Unimplemented, "method GetUserInviteInfo is not implemented") +} + +func (am *MockAccountManager) ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + if am.ListUserInvitesFunc != nil { + return am.ListUserInvitesFunc(ctx, accountID, initiatorUserID) + } + return nil, status.Errorf(codes.Unimplemented, "method ListUserInvites is not implemented") +} + +func (am *MockAccountManager) DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + if am.DeleteUserInviteFunc != nil { + return am.DeleteUserInviteFunc(ctx, accountID, initiatorUserID, inviteID) + } + return status.Errorf(codes.Unimplemented, "method DeleteUserInvite is not implemented") +} + func (am *MockAccountManager) GetAccountIDFromUserAuth(ctx context.Context, userAuth auth.UserAuth) (string, string, error) { if am.GetAccountIDFromUserAuthFunc != nil { return am.GetAccountIDFromUserAuthFunc(ctx, userAuth) diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index 955c6b0ef..0d781e0d4 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" @@ -790,13 +791,14 @@ func createNSManager(t *testing.T) (*DefaultAccountManager, error) { AnyTimes() permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - return BuildManager(context.Background(), nil, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + return BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) } func createNSStore(t *testing.T) (store.Store, error) { diff --git a/management/server/peer.go b/management/server/peer.go index 977bd52af..80c74e209 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -31,6 +31,8 @@ import ( "github.com/netbirdio/netbird/shared/management/status" ) +const remoteJobsMinVer = "0.64.0" + // GetPeers returns a list of peers under the given account filtering out peers that do not belong to a user if // the current user is not an admin. func (am *DefaultAccountManager) GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error) { @@ -324,6 +326,134 @@ func (am *DefaultAccountManager) UpdatePeer(ctx context.Context, accountID, user return peer, nil } +func (am *DefaultAccountManager) CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error { + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.RemoteJobs, operations.Create) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !allowed { + return status.NewPermissionDeniedError() + } + + p, err := am.Store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID) + if err != nil { + return err + } + + if p.AccountID != accountID { + return status.NewPeerNotPartOfAccountError() + } + + meetMinVer, err := posture.MeetsMinVersion(remoteJobsMinVer, p.Meta.WtVersion) + if !strings.Contains(p.Meta.WtVersion, "dev") && (!meetMinVer || err != nil) { + return status.Errorf(status.PreconditionFailed, "peer version %s does not meet the minimum required version %s for remote jobs", p.Meta.WtVersion, remoteJobsMinVer) + } + + if !am.jobManager.IsPeerConnected(peerID) { + return status.Errorf(status.BadRequest, "peer not connected") + } + + // check if already has pending jobs + // todo: The job checks here are not protected. The user can run this function from multiple threads, + // and each thread can think there is no job yet. This means entries in the pending job map will be overwritten, + // and only one will be kept, but potentially another one will overwrite it in the queue. + if am.jobManager.IsPeerHasPendingJobs(peerID) { + return status.Errorf(status.BadRequest, "peer already has pending job") + } + + jobStream, err := job.ToStreamJobRequest() + if err != nil { + return status.Errorf(status.BadRequest, "invalid job request %v", err) + } + + // try sending job first + if err := am.jobManager.SendJob(ctx, accountID, peerID, jobStream); err != nil { + return status.Errorf(status.Internal, "failed to send job: %v", err) + } + + var peer *nbpeer.Peer + var eventsToStore func() + + // persist job in DB only if send succeeded + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + peer, err = transaction.GetPeerByID(ctx, store.LockingStrengthUpdate, accountID, peerID) + if err != nil { + return err + } + if err := transaction.CreatePeerJob(ctx, job); err != nil { + return err + } + + jobMeta := map[string]any{ + "for_peer_name": peer.Name, + "job_type": job.Workload.Type, + } + + eventsToStore = func() { + am.StoreEvent(ctx, userID, peer.ID, accountID, activity.JobCreatedByUser, jobMeta) + } + return nil + }) + if err != nil { + return err + } + eventsToStore() + return nil +} + +func (am *DefaultAccountManager) GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error) { + // todo: Create permissions for job + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.RemoteJobs, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + peerAccountID, err := am.Store.GetAccountIDByPeerID(ctx, store.LockingStrengthNone, peerID) + if err != nil { + return nil, err + } + + if peerAccountID != accountID { + return nil, status.NewPeerNotPartOfAccountError() + } + + accountJobs, err := am.Store.GetPeerJobs(ctx, accountID, peerID) + if err != nil { + return nil, err + } + + return accountJobs, nil +} + +func (am *DefaultAccountManager) GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error) { + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.RemoteJobs, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + peerAccountID, err := am.Store.GetAccountIDByPeerID(ctx, store.LockingStrengthNone, peerID) + if err != nil { + return nil, err + } + + if peerAccountID != accountID { + return nil, status.NewPeerNotPartOfAccountError() + } + + job, err := am.Store.GetPeerJobByID(ctx, accountID, jobID) + if err != nil { + return nil, err + } + + return job, nil +} + // DeletePeer removes peer from the account by its IP func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peerID, userID string) error { allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Peers, operations.Delete) @@ -598,6 +728,11 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe return fmt.Errorf("failed adding peer to All group: %w", err) } + if temporary { + // we should track ephemeral peers to be able to clean them if the peer don't sync and be marked as connected + am.networkMapController.TrackEphemeralPeer(ctx, newPeer) + } + if addedByUser { err := transaction.SaveUserLastLogin(ctx, accountID, userID, newPeer.GetLastLogin()) if err != nil { diff --git a/management/server/peer_test.go b/management/server/peer_test.go index 0160ff586..3846a3e85 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -34,6 +34,7 @@ import ( "github.com/netbirdio/netbird/management/internals/shared/grpc" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" "github.com/netbirdio/netbird/shared/management/status" @@ -1289,13 +1290,14 @@ func Test_RegisterPeerByUser(t *testing.T) { t.Cleanup(ctrl.Finish) settingsMockManager := settings.NewMockManager(ctrl) permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1374,13 +1376,14 @@ func Test_RegisterPeerBySetupKey(t *testing.T) { Return(&types.ExtraSettings{}, nil). AnyTimes() permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1527,13 +1530,14 @@ func Test_RegisterPeerRollbackOnFailure(t *testing.T) { settingsMockManager := settings.NewMockManager(ctrl) permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" @@ -1607,13 +1611,14 @@ func Test_LoginPeer(t *testing.T) { Return(&types.ExtraSettings{}, nil). AnyTimes() permissionsManager := permissions.NewManager(s) + peersManager := peers.NewManager(s, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, s) networkMapController := controller.NewController(ctx, s, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.cloud", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(s, peers.NewManager(s, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, s, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, s, networkMapController, job.NewJobManager(nil, s, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) assert.NoError(t, err) existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" diff --git a/management/server/permissions/modules/module.go b/management/server/permissions/modules/module.go index 67e491087..1e75330eb 100644 --- a/management/server/permissions/modules/module.go +++ b/management/server/permissions/modules/module.go @@ -3,36 +3,38 @@ package modules type Module string const ( - Networks Module = "networks" - Peers Module = "peers" - Groups Module = "groups" - Settings Module = "settings" - Accounts Module = "accounts" - Dns Module = "dns" - Nameservers Module = "nameservers" - Events Module = "events" - Policies Module = "policies" - Routes Module = "routes" - Users Module = "users" - SetupKeys Module = "setup_keys" - Pats Module = "pats" + Networks Module = "networks" + Peers Module = "peers" + RemoteJobs Module = "remote_jobs" + Groups Module = "groups" + Settings Module = "settings" + Accounts Module = "accounts" + Dns Module = "dns" + Nameservers Module = "nameservers" + Events Module = "events" + Policies Module = "policies" + Routes Module = "routes" + Users Module = "users" + SetupKeys Module = "setup_keys" + Pats Module = "pats" IdentityProviders Module = "identity_providers" Services Module = "services" ) var All = map[Module]struct{}{ - Networks: {}, - Peers: {}, - Groups: {}, - Settings: {}, - Accounts: {}, - Dns: {}, - Nameservers: {}, - Events: {}, - Policies: {}, - Routes: {}, - Users: {}, - SetupKeys: {}, - Pats: {}, + Networks: {}, + Peers: {}, + RemoteJobs: {}, + Groups: {}, + Settings: {}, + Accounts: {}, + Dns: {}, + Nameservers: {}, + Events: {}, + Policies: {}, + Routes: {}, + Users: {}, + SetupKeys: {}, + Pats: {}, IdentityProviders: {}, } diff --git a/management/server/route_test.go b/management/server/route_test.go index 6dc8c4cf4..d4882eff8 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -21,6 +21,7 @@ import ( "github.com/netbirdio/netbird/management/internals/server/config" "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" + "github.com/netbirdio/netbird/management/server/job" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" @@ -1289,13 +1290,14 @@ func createRouterManager(t *testing.T) (*DefaultAccountManager, *update_channel. Return(&types.ExtraSettings{}, nil) permissionsManager := permissions.NewManager(store) + peersManager := peers.NewManager(store, permissionsManager) ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := NewAccountRequestBuffer(ctx, store) networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peers.NewManager(store, permissionsManager)), &config.Config{}) - am, err := BuildManager(context.Background(), nil, store, networkMapController, nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) + am, err := BuildManager(context.Background(), nil, store, networkMapController, job.NewJobManager(nil, store, peersManager), nil, "", eventStore, nil, false, MockIntegratedValidator{}, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManager, false) if err != nil { return nil, nil, err } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 64f8f4711..5959d9946 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -44,14 +44,15 @@ import ( ) const ( - storeSqliteFileName = "store.db" - idQueryCondition = "id = ?" - keyQueryCondition = "key = ?" - mysqlKeyQueryCondition = "`key` = ?" - accountAndIDQueryCondition = "account_id = ? and id = ?" - accountAndIDsQueryCondition = "account_id = ? AND id IN ?" - accountIDCondition = "account_id = ?" - peerNotFoundFMT = "peer %s not found" + storeSqliteFileName = "store.db" + idQueryCondition = "id = ?" + keyQueryCondition = "key = ?" + mysqlKeyQueryCondition = "`key` = ?" + accountAndIDQueryCondition = "account_id = ? and id = ?" + accountAndPeerIDQueryCondition = "account_id = ? and peer_id = ?" + accountAndIDsQueryCondition = "account_id = ? AND id IN ?" + accountIDCondition = "account_id = ?" + peerNotFoundFMT = "peer %s not found" pgMaxConnections = 30 pgMinConnections = 1 @@ -126,7 +127,7 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met &types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{}, &installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, - &zones.Zone{}, &records.Record{}, &services.Service{}, + &types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, &services.Service{}, ) if err != nil { return nil, fmt.Errorf("auto migratePreAuto: %w", err) @@ -145,6 +146,97 @@ func GetKeyQueryCondition(s *SqlStore) string { return keyQueryCondition } +// SaveJob persists a job in DB +func (s *SqlStore) CreatePeerJob(ctx context.Context, job *types.Job) error { + result := s.db.Create(job) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to create job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to create job in store") + } + return nil +} + +func (s *SqlStore) CompletePeerJob(ctx context.Context, job *types.Job) error { + result := s.db. + Model(&types.Job{}). + Where(idQueryCondition, job.ID). + Updates(job) + + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to update job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to update job in store") + } + return nil +} + +// job was pending for too long and has been cancelled +func (s *SqlStore) MarkPendingJobsAsFailed(ctx context.Context, accountID, peerID, jobID, reason string) error { + now := time.Now().UTC() + result := s.db. + Model(&types.Job{}). + Where(accountAndPeerIDQueryCondition+" AND id = ?"+" AND status = ?", accountID, peerID, jobID, types.JobStatusPending). + Updates(types.Job{ + Status: types.JobStatusFailed, + FailedReason: reason, + CompletedAt: &now, + }) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as Failed job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to mark pending job as Failed in store") + } + return nil +} + +// job was pending for too long and has been cancelled +func (s *SqlStore) MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error { + now := time.Now().UTC() + result := s.db. + Model(&types.Job{}). + Where(accountAndPeerIDQueryCondition+" AND status = ?", accountID, peerID, types.JobStatusPending). + Updates(types.Job{ + Status: types.JobStatusFailed, + FailedReason: reason, + CompletedAt: &now, + }) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to mark pending jobs as Failed job in store: %s", result.Error) + return status.Errorf(status.Internal, "failed to mark pending job as Failed in store") + } + return nil +} + +// GetJobByID fetches job by ID +func (s *SqlStore) GetPeerJobByID(ctx context.Context, accountID, jobID string) (*types.Job, error) { + var job types.Job + err := s.db. + Where(accountAndIDQueryCondition, accountID, jobID). + First(&job).Error + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "job %s not found", jobID) + } + if err != nil { + log.WithContext(ctx).Errorf("failed to fetch job from store: %s", err) + return nil, err + } + return &job, nil +} + +// get all jobs +func (s *SqlStore) GetPeerJobs(ctx context.Context, accountID, peerID string) ([]*types.Job, error) { + var jobs []*types.Job + err := s.db. + Where(accountAndPeerIDQueryCondition, accountID, peerID). + Order("created_at DESC"). + Find(&jobs).Error + + if err != nil { + log.WithContext(ctx).Errorf("failed to fetch jobs from store: %s", err) + return nil, err + } + + return jobs, nil +} + // AcquireGlobalLock acquires global lock across all the accounts and returns a function that releases the lock func (s *SqlStore) AcquireGlobalLock(ctx context.Context) (unlock func()) { log.WithContext(ctx).Tracef("acquiring global lock") @@ -724,6 +816,130 @@ func (s *SqlStore) GetAccountOwner(ctx context.Context, lockStrength LockingStre return &user, nil } +// SaveUserInvite saves a user invite to the database +func (s *SqlStore) SaveUserInvite(ctx context.Context, invite *types.UserInviteRecord) error { + inviteCopy := invite.Copy() + if err := inviteCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt invite: %w", err) + } + + result := s.db.Save(inviteCopy) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to save user invite to store: %s", result.Error) + return status.Errorf(status.Internal, "failed to save user invite to store") + } + return nil +} + +// GetUserInviteByID retrieves a user invite by its ID and account ID +func (s *SqlStore) GetUserInviteByID(ctx context.Context, lockStrength LockingStrength, accountID, inviteID string) (*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invite types.UserInviteRecord + result := tx.Where("account_id = ?", accountID).Take(&invite, idQueryCondition, inviteID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "user invite not found") + } + log.WithContext(ctx).Errorf("failed to get user invite from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invite from store") + } + + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + + return &invite, nil +} + +// GetUserInviteByHashedToken retrieves a user invite by its hashed token +func (s *SqlStore) GetUserInviteByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invite types.UserInviteRecord + result := tx.Take(&invite, "hashed_token = ?", hashedToken) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.Errorf(status.NotFound, "user invite not found") + } + log.WithContext(ctx).Errorf("failed to get user invite from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invite from store") + } + + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + + return &invite, nil +} + +// GetUserInviteByEmail retrieves a user invite by account ID and email. +// Since email is encrypted with random IVs, we fetch all invites for the account +// and compare emails in memory after decryption. +func (s *SqlStore) GetUserInviteByEmail(ctx context.Context, lockStrength LockingStrength, accountID, email string) (*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invites []*types.UserInviteRecord + result := tx.Find(&invites, "account_id = ?", accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get user invites from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invites from store") + } + + for _, invite := range invites { + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + if strings.EqualFold(invite.Email, email) { + return invite, nil + } + } + + return nil, status.Errorf(status.NotFound, "user invite not found for email") +} + +// GetAccountUserInvites retrieves all user invites for an account +func (s *SqlStore) GetAccountUserInvites(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.UserInviteRecord, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var invites []*types.UserInviteRecord + result := tx.Find(&invites, "account_id = ?", accountID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get user invites from store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get user invites from store") + } + + for _, invite := range invites { + if err := invite.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt invite: %w", err) + } + } + + return invites, nil +} + +// DeleteUserInvite deletes a user invite by its ID +func (s *SqlStore) DeleteUserInvite(ctx context.Context, inviteID string) error { + result := s.db.Delete(&types.UserInviteRecord{}, idQueryCondition, inviteID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to delete user invite from store: %s", result.Error) + return status.Errorf(status.Internal, "failed to delete user invite from store") + } + return nil +} + func (s *SqlStore) GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error) { tx := s.db if lockStrength != LockingStrengthNone { @@ -4178,6 +4394,9 @@ func (s *SqlStore) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingS Take(&userID, GetKeyQueryCondition(s), peerKey) if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return "", status.Errorf(status.NotFound, "peer not found: index lookup failed") + } return "", status.Errorf(status.Internal, "failed to get user ID by peer key") } @@ -4365,6 +4584,26 @@ func (s *SqlStore) DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID s return nil } +func (s *SqlStore) GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) { + tx := s.db + if lockStrength != LockingStrengthNone { + tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)}) + } + + var peerID string + result := tx.Model(&nbpeer.Peer{}). + Select("id"). + Where(GetKeyQueryCondition(s), key). + Limit(1). + Scan(&peerID) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to get peer ID by key: %s", result.Error) + return "", status.Errorf(status.Internal, "failed to get peer ID by key") + } + + return peerID, nil +} + func (s *SqlStore) CreateService(ctx context.Context, service *services.Service) error { result := s.db.Create(service) if result.Error != nil { diff --git a/management/server/store/sql_store_user_invite_test.go b/management/server/store/sql_store_user_invite_test.go new file mode 100644 index 000000000..fb6934a2e --- /dev/null +++ b/management/server/store/sql_store_user_invite_test.go @@ -0,0 +1,520 @@ +package store + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/types" +) + +func TestSqlStore_SaveUserInvite(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-1", + AccountID: "account-1", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group-1", "group-2"}, + HashedToken: "hashed-token-123", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Verify the invite was saved + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + assert.Equal(t, invite.Email, retrieved.Email) + assert.Equal(t, invite.Name, retrieved.Name) + assert.Equal(t, invite.Role, retrieved.Role) + assert.Equal(t, invite.AutoGroups, retrieved.AutoGroups) + assert.Equal(t, invite.CreatedBy, retrieved.CreatedBy) + }) +} + +func TestSqlStore_SaveUserInvite_Update(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-update", + AccountID: "account-1", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-123", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Update the invite with a new token + invite.HashedToken = "new-hashed-token" + invite.ExpiresAt = time.Now().Add(24 * time.Hour) + + err = store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Verify the update + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, "new-hashed-token", retrieved.HashedToken) + }) +} + +func TestSqlStore_GetUserInviteByID(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-get-by-id", + AccountID: "account-1", + Email: "getbyid@example.com", + Name: "Get By ID User", + Role: "admin", + AutoGroups: []string{}, + HashedToken: "hashed-token-get", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Get by ID - success + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + assert.Equal(t, invite.Email, retrieved.Email) + + // Get by ID - wrong account + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, "wrong-account", invite.ID) + assert.Error(t, err) + + // Get by ID - not found + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, "non-existent") + assert.Error(t, err) + }) +} + +func TestSqlStore_GetUserInviteByHashedToken(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-get-by-token", + AccountID: "account-1", + Email: "getbytoken@example.com", + Name: "Get By Token User", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "unique-hashed-token-456", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Get by hashed token - success + retrieved, err := store.GetUserInviteByHashedToken(ctx, LockingStrengthNone, invite.HashedToken) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + assert.Equal(t, invite.Email, retrieved.Email) + + // Get by hashed token - not found + _, err = store.GetUserInviteByHashedToken(ctx, LockingStrengthNone, "non-existent-token") + assert.Error(t, err) + }) +} + +func TestSqlStore_GetUserInviteByEmail(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-get-by-email", + AccountID: "account-email-test", + Email: "unique-email@example.com", + Name: "Get By Email User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-email", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Get by email - success + retrieved, err := store.GetUserInviteByEmail(ctx, LockingStrengthNone, invite.AccountID, invite.Email) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + // Get by email - case insensitive + retrieved, err = store.GetUserInviteByEmail(ctx, LockingStrengthNone, invite.AccountID, "UNIQUE-EMAIL@EXAMPLE.COM") + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + // Get by email - wrong account + _, err = store.GetUserInviteByEmail(ctx, LockingStrengthNone, "wrong-account", invite.Email) + assert.Error(t, err) + + // Get by email - not found + _, err = store.GetUserInviteByEmail(ctx, LockingStrengthNone, invite.AccountID, "nonexistent@example.com") + assert.Error(t, err) + }) +} + +func TestSqlStore_GetAccountUserInvites(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + accountID := "account-list-invites" + + invites := []*types.UserInviteRecord{ + { + ID: "invite-list-1", + AccountID: accountID, + Email: "user1@example.com", + Name: "User One", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-list-1", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + }, + { + ID: "invite-list-2", + AccountID: accountID, + Email: "user2@example.com", + Name: "User Two", + Role: "admin", + AutoGroups: []string{"group-2"}, + HashedToken: "hashed-token-list-2", + ExpiresAt: time.Now().Add(24 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + }, + { + ID: "invite-list-3", + AccountID: "different-account", + Email: "user3@example.com", + Name: "User Three", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-list-3", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + }, + } + + for _, invite := range invites { + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + } + + // Get all invites for the account + retrieved, err := store.GetAccountUserInvites(ctx, LockingStrengthNone, accountID) + require.NoError(t, err) + assert.Len(t, retrieved, 2) + + // Verify the invites belong to the correct account + for _, invite := range retrieved { + assert.Equal(t, accountID, invite.AccountID) + } + + // Get invites for account with no invites + retrieved, err = store.GetAccountUserInvites(ctx, LockingStrengthNone, "empty-account") + require.NoError(t, err) + assert.Len(t, retrieved, 0) + }) +} + +func TestSqlStore_DeleteUserInvite(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-delete", + AccountID: "account-delete-test", + Email: "delete@example.com", + Name: "Delete User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-delete", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Verify invite exists + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + + // Delete the invite + err = store.DeleteUserInvite(ctx, invite.ID) + require.NoError(t, err) + + // Verify invite is deleted + _, err = store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + assert.Error(t, err) + }) +} + +func TestSqlStore_UserInvite_EncryptedFields(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-encrypted", + AccountID: "account-encrypted", + Email: "sensitive-email@example.com", + Name: "Sensitive Name", + Role: "user", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-encrypted", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Retrieve and verify decryption works + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, "sensitive-email@example.com", retrieved.Email) + assert.Equal(t, "Sensitive Name", retrieved.Name) + }) +} + +func TestSqlStore_DeleteUserInvite_NonExistent(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + // Deleting a non-existent invite should not return an error + err := store.DeleteUserInvite(ctx, "non-existent-invite-id") + require.NoError(t, err) + }) +} + +func TestSqlStore_UserInvite_SameEmailDifferentAccounts(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + email := "shared-email@example.com" + + // Create invite in first account + invite1 := &types.UserInviteRecord{ + ID: "invite-account1", + AccountID: "account-1", + Email: email, + Name: "User Account 1", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-account1", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-1", + } + + // Create invite in second account with same email + invite2 := &types.UserInviteRecord{ + ID: "invite-account2", + AccountID: "account-2", + Email: email, + Name: "User Account 2", + Role: "admin", + AutoGroups: []string{"group-1"}, + HashedToken: "hashed-token-account2", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-2", + } + + err := store.SaveUserInvite(ctx, invite1) + require.NoError(t, err) + + err = store.SaveUserInvite(ctx, invite2) + require.NoError(t, err) + + // Verify each account gets the correct invite by email + retrieved1, err := store.GetUserInviteByEmail(ctx, LockingStrengthNone, "account-1", email) + require.NoError(t, err) + assert.Equal(t, "invite-account1", retrieved1.ID) + assert.Equal(t, "User Account 1", retrieved1.Name) + + retrieved2, err := store.GetUserInviteByEmail(ctx, LockingStrengthNone, "account-2", email) + require.NoError(t, err) + assert.Equal(t, "invite-account2", retrieved2.ID) + assert.Equal(t, "User Account 2", retrieved2.Name) + }) +} + +func TestSqlStore_UserInvite_LockingStrength(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + invite := &types.UserInviteRecord{ + ID: "invite-locking", + AccountID: "account-locking", + Email: "locking@example.com", + Name: "Locking Test User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-locking", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + // Test with different locking strengths + lockStrengths := []LockingStrength{LockingStrengthNone, LockingStrengthShare, LockingStrengthUpdate} + + for _, strength := range lockStrengths { + retrieved, err := store.GetUserInviteByID(ctx, strength, invite.AccountID, invite.ID) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + retrieved, err = store.GetUserInviteByHashedToken(ctx, strength, invite.HashedToken) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + retrieved, err = store.GetUserInviteByEmail(ctx, strength, invite.AccountID, invite.Email) + require.NoError(t, err) + assert.Equal(t, invite.ID, retrieved.ID) + + invites, err := store.GetAccountUserInvites(ctx, strength, invite.AccountID) + require.NoError(t, err) + assert.Len(t, invites, 1) + } + }) +} + +func TestSqlStore_UserInvite_EmptyAutoGroups(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + // Test with nil AutoGroups + invite := &types.UserInviteRecord{ + ID: "invite-nil-autogroups", + AccountID: "account-autogroups", + Email: "nilgroups@example.com", + Name: "Nil Groups User", + Role: "user", + AutoGroups: nil, + HashedToken: "hashed-token-nil", + ExpiresAt: time.Now().Add(72 * time.Hour), + CreatedAt: time.Now(), + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + // Should return empty slice or nil, both are acceptable + assert.Empty(t, retrieved.AutoGroups) + }) +} + +func TestSqlStore_UserInvite_TimestampPrecision(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Skip("store is nil") + } + ctx := context.Background() + + now := time.Now().UTC().Truncate(time.Millisecond) + expiresAt := now.Add(72 * time.Hour) + + invite := &types.UserInviteRecord{ + ID: "invite-timestamp", + AccountID: "account-timestamp", + Email: "timestamp@example.com", + Name: "Timestamp User", + Role: "user", + AutoGroups: []string{}, + HashedToken: "hashed-token-timestamp", + ExpiresAt: expiresAt, + CreatedAt: now, + CreatedBy: "admin-user", + } + + err := store.SaveUserInvite(ctx, invite) + require.NoError(t, err) + + retrieved, err := store.GetUserInviteByID(ctx, LockingStrengthNone, invite.AccountID, invite.ID) + require.NoError(t, err) + + // Verify timestamps are preserved (within reasonable precision) + assert.WithinDuration(t, now, retrieved.CreatedAt, time.Second) + assert.WithinDuration(t, expiresAt, retrieved.ExpiresAt, time.Second) + }) +} diff --git a/management/server/store/store.go b/management/server/store/store.go index 33d529740..f102f7793 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -93,6 +93,13 @@ type Store interface { DeleteHashedPAT2TokenIDIndex(hashedToken string) error DeleteTokenID2UserIDIndex(tokenID string) error + SaveUserInvite(ctx context.Context, invite *types.UserInviteRecord) error + GetUserInviteByID(ctx context.Context, lockStrength LockingStrength, accountID, inviteID string) (*types.UserInviteRecord, error) + GetUserInviteByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.UserInviteRecord, error) + GetUserInviteByEmail(ctx context.Context, lockStrength LockingStrength, accountID, email string) (*types.UserInviteRecord, error) + GetAccountUserInvites(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.UserInviteRecord, error) + DeleteUserInvite(ctx context.Context, inviteID string) error + GetPATByID(ctx context.Context, lockStrength LockingStrength, userID, patID string) (*types.PersonalAccessToken, error) GetUserPATs(ctx context.Context, lockStrength LockingStrength, userID string) ([]*types.PersonalAccessToken, error) GetPATByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.PersonalAccessToken, error) @@ -227,6 +234,13 @@ type Store interface { GetZoneDNSRecords(ctx context.Context, lockStrength LockingStrength, accountID, zoneID string) ([]*records.Record, error) GetZoneDNSRecordsByName(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, name string) ([]*records.Record, error) DeleteZoneDNSRecords(ctx context.Context, accountID, zoneID string) error + CreatePeerJob(ctx context.Context, job *types.Job) error + CompletePeerJob(ctx context.Context, job *types.Job) error + GetPeerJobByID(ctx context.Context, accountID, jobID string) (*types.Job, error) + GetPeerJobs(ctx context.Context, accountID, peerID string) ([]*types.Job, error) + MarkPendingJobsAsFailed(ctx context.Context, accountID, peerID, jobID, reason string) error + MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error + GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error) CreateService(ctx context.Context, service *services.Service) error UpdateService(ctx context.Context, service *services.Service) error diff --git a/management/server/types/job.go b/management/server/types/job.go new file mode 100644 index 000000000..bad8f00ba --- /dev/null +++ b/management/server/types/job.go @@ -0,0 +1,228 @@ +package types + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/google/uuid" + + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/proto" + "github.com/netbirdio/netbird/shared/management/status" +) + +type JobStatus string + +const ( + JobStatusPending JobStatus = "pending" + JobStatusSucceeded JobStatus = "succeeded" + JobStatusFailed JobStatus = "failed" +) + +type JobType string + +const ( + JobTypeBundle JobType = "bundle" +) + +const ( + // MaxJobReasonLength is the maximum length allowed for job failure reasons + MaxJobReasonLength = 4096 +) + +type Job struct { + // ID is the primary identifier + ID string `gorm:"primaryKey"` + + // CreatedAt when job was created (UTC) + CreatedAt time.Time `gorm:"autoCreateTime"` + + // CompletedAt when job finished, null if still running + CompletedAt *time.Time + + // TriggeredBy user that triggered this job + TriggeredBy string `gorm:"index"` + + PeerID string `gorm:"index"` + + AccountID string `gorm:"index"` + + // Status of the job: pending, succeeded, failed + Status JobStatus `gorm:"index;type:varchar(50)"` + + // FailedReason describes why the job failed (if failed) + FailedReason string + + Workload Workload `gorm:"embedded;embeddedPrefix:workload_"` +} + +type Workload struct { + Type JobType `gorm:"column:workload_type;index;type:varchar(50)"` + Parameters json.RawMessage `gorm:"type:json"` + Result json.RawMessage `gorm:"type:json"` +} + +// NewJob creates a new job with default fields and validation +func NewJob(triggeredBy, accountID, peerID string, req *api.JobRequest) (*Job, error) { + if req == nil { + return nil, status.Errorf(status.BadRequest, "job request cannot be nil") + } + + // Determine job type + jobTypeStr, err := req.Workload.Discriminator() + if err != nil { + return nil, status.Errorf(status.BadRequest, "could not determine job type: %v", err) + } + jobType := JobType(jobTypeStr) + + if jobType == "" { + return nil, status.Errorf(status.BadRequest, "job type is required") + } + + var workload Workload + + switch jobType { + case JobTypeBundle: + if err := validateAndBuildBundleParams(req.Workload, &workload); err != nil { + return nil, status.Errorf(status.BadRequest, "%v", err) + } + default: + return nil, status.Errorf(status.BadRequest, "unsupported job type: %s", jobType) + } + + return &Job{ + ID: uuid.New().String(), + TriggeredBy: triggeredBy, + PeerID: peerID, + AccountID: accountID, + Status: JobStatusPending, + CreatedAt: time.Now().UTC(), + Workload: workload, + }, nil +} + +func (j *Job) BuildWorkloadResponse() (*api.WorkloadResponse, error) { + var wl api.WorkloadResponse + + switch j.Workload.Type { + case JobTypeBundle: + if err := j.buildBundleResponse(&wl); err != nil { + return nil, status.Errorf(status.Internal, "failed to process job: %v", err.Error()) + } + return &wl, nil + + default: + return nil, status.Errorf(status.InvalidArgument, "unknown job type: %v", j.Workload.Type) + } +} + +func (j *Job) buildBundleResponse(wl *api.WorkloadResponse) error { + var p api.BundleParameters + if err := json.Unmarshal(j.Workload.Parameters, &p); err != nil { + return fmt.Errorf("invalid parameters for bundle job: %w", err) + } + var r api.BundleResult + if err := json.Unmarshal(j.Workload.Result, &r); err != nil { + return fmt.Errorf("invalid result for bundle job: %w", err) + } + + if err := wl.FromBundleWorkloadResponse(api.BundleWorkloadResponse{ + Type: api.WorkloadTypeBundle, + Parameters: p, + Result: r, + }); err != nil { + return fmt.Errorf("unknown job parameters: %v", err) + } + return nil +} + +func validateAndBuildBundleParams(req api.WorkloadRequest, workload *Workload) error { + bundle, err := req.AsBundleWorkloadRequest() + if err != nil { + return fmt.Errorf("invalid parameters for bundle job") + } + // validate bundle_for_time <= 5 minutes if BundleFor is enabled + if bundle.Parameters.BundleFor && (bundle.Parameters.BundleForTime < 1 || bundle.Parameters.BundleForTime > 5) { + return fmt.Errorf("bundle_for_time must be between 1 and 5, got %d", bundle.Parameters.BundleForTime) + } + // validate log-file-count ≥ 1 and ≤ 1000 + if bundle.Parameters.LogFileCount < 1 || bundle.Parameters.LogFileCount > 1000 { + return fmt.Errorf("log-file-count must be between 1 and 1000, got %d", bundle.Parameters.LogFileCount) + } + + workload.Parameters, err = json.Marshal(bundle.Parameters) + if err != nil { + return fmt.Errorf("failed to marshal workload parameters: %w", err) + } + workload.Result = []byte("{}") + workload.Type = JobType(api.WorkloadTypeBundle) + + return nil +} + +// ApplyResponse validates and maps a proto.JobResponse into the Job fields. +func (j *Job) ApplyResponse(resp *proto.JobResponse) error { + if resp == nil { + return nil + } + + j.ID = string(resp.ID) + now := time.Now().UTC() + j.CompletedAt = &now + switch resp.Status { + case proto.JobStatus_succeeded: + j.Status = JobStatusSucceeded + case proto.JobStatus_failed: + j.Status = JobStatusFailed + if len(resp.Reason) > 0 { + reason := string(resp.Reason) + if len(resp.Reason) > MaxJobReasonLength { + reason = string(resp.Reason[:MaxJobReasonLength]) + "... (truncated)" + } + j.FailedReason = fmt.Sprintf("Client error: '%s'", reason) + } + return nil + default: + return fmt.Errorf("unexpected job status: %v", resp.Status) + } + + // Handle workload results (oneof) + var err error + switch r := resp.WorkloadResults.(type) { + case *proto.JobResponse_Bundle: + if j.Workload.Result, err = json.Marshal(r.Bundle); err != nil { + return fmt.Errorf("failed to marshal workload results: %w", err) + } + default: + return fmt.Errorf("unsupported workload response type: %T", r) + } + return nil +} + +func (j *Job) ToStreamJobRequest() (*proto.JobRequest, error) { + switch j.Workload.Type { + case JobTypeBundle: + return j.buildStreamBundleResponse() + default: + return nil, status.Errorf(status.InvalidArgument, "unknown job type: %v", j.Workload.Type) + } +} + +func (j *Job) buildStreamBundleResponse() (*proto.JobRequest, error) { + var p api.BundleParameters + if err := json.Unmarshal(j.Workload.Parameters, &p); err != nil { + return nil, fmt.Errorf("invalid parameters for bundle job: %w", err) + } + return &proto.JobRequest{ + ID: []byte(j.ID), + WorkloadParameters: &proto.JobRequest_Bundle{ + Bundle: &proto.BundleParameters{ + BundleFor: p.BundleFor, + BundleForTime: int64(p.BundleForTime), + LogFileCount: int32(p.LogFileCount), + Anonymize: p.Anonymize, + }, + }, + }, nil +} diff --git a/management/server/types/user_invite.go b/management/server/types/user_invite.go new file mode 100644 index 000000000..1544b0ff3 --- /dev/null +++ b/management/server/types/user_invite.go @@ -0,0 +1,201 @@ +package types + +import ( + "crypto/sha256" + b64 "encoding/base64" + "fmt" + "hash/crc32" + "strings" + "time" + + b "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/rs/xid" + + "github.com/netbirdio/netbird/base62" + "github.com/netbirdio/netbird/util/crypt" +) + +const ( + // InviteTokenPrefix is the prefix for invite tokens + InviteTokenPrefix = "nbi_" + // InviteTokenSecretLength is the length of the random secret part + InviteTokenSecretLength = 30 + // InviteTokenChecksumLength is the length of the encoded checksum + InviteTokenChecksumLength = 6 + // InviteTokenLength is the total length of the token (4 + 30 + 6 = 40) + InviteTokenLength = 40 + // DefaultInviteExpirationSeconds is the default expiration time for invites (72 hours) + DefaultInviteExpirationSeconds = 259200 + // MinInviteExpirationSeconds is the minimum expiration time for invites (1 hour) + MinInviteExpirationSeconds = 3600 +) + +// UserInviteRecord represents an invitation for a user to set up their account (database model) +type UserInviteRecord struct { + ID string `gorm:"primaryKey"` + AccountID string `gorm:"index;not null"` + Email string `gorm:"index;not null"` + Name string `gorm:"not null"` + Role string `gorm:"not null"` + AutoGroups []string `gorm:"serializer:json"` + HashedToken string `gorm:"index;not null"` // SHA-256 hash of the token (base64 encoded) + ExpiresAt time.Time `gorm:"not null"` + CreatedAt time.Time `gorm:"not null"` + CreatedBy string `gorm:"not null"` +} + +// TableName returns the table name for GORM +func (UserInviteRecord) TableName() string { + return "user_invites" +} + +// GenerateInviteToken creates a new invite token with the format: nbi_ +// Returns the hashed token (for storage) and the plain token (to give to the user) +func GenerateInviteToken() (hashedToken string, plainToken string, err error) { + secret, err := b.Random(InviteTokenSecretLength) + if err != nil { + return "", "", fmt.Errorf("failed to generate random secret: %w", err) + } + + checksum := crc32.ChecksumIEEE([]byte(secret)) + encodedChecksum := base62.Encode(checksum) + // Left-pad with '0' to ensure exactly 6 characters (fmt.Sprintf %s pads with spaces which breaks base62.Decode) + paddedChecksum := encodedChecksum + if len(paddedChecksum) < InviteTokenChecksumLength { + paddedChecksum = strings.Repeat("0", InviteTokenChecksumLength-len(paddedChecksum)) + paddedChecksum + } + + plainToken = InviteTokenPrefix + secret + paddedChecksum + hash := sha256.Sum256([]byte(plainToken)) + hashedToken = b64.StdEncoding.EncodeToString(hash[:]) + + return hashedToken, plainToken, nil +} + +// HashInviteToken creates a SHA-256 hash of the token (base64 encoded) +func HashInviteToken(token string) string { + hash := sha256.Sum256([]byte(token)) + return b64.StdEncoding.EncodeToString(hash[:]) +} + +// ValidateInviteToken validates the token format and checksum. +// Returns an error if the token is invalid. +func ValidateInviteToken(token string) error { + if len(token) != InviteTokenLength { + return fmt.Errorf("invalid token length") + } + + prefix := token[:len(InviteTokenPrefix)] + if prefix != InviteTokenPrefix { + return fmt.Errorf("invalid token prefix") + } + + secret := token[len(InviteTokenPrefix) : len(InviteTokenPrefix)+InviteTokenSecretLength] + encodedChecksum := token[len(InviteTokenPrefix)+InviteTokenSecretLength:] + + verificationChecksum, err := base62.Decode(encodedChecksum) + if err != nil { + return fmt.Errorf("checksum decoding failed: %w", err) + } + + secretChecksum := crc32.ChecksumIEEE([]byte(secret)) + if secretChecksum != verificationChecksum { + return fmt.Errorf("checksum does not match") + } + + return nil +} + +// IsExpired checks if the invite has expired +func (i *UserInviteRecord) IsExpired() bool { + return time.Now().After(i.ExpiresAt) +} + +// UserInvite contains the result of creating or regenerating an invite +type UserInvite struct { + UserInfo *UserInfo + InviteToken string + InviteExpiresAt time.Time + InviteCreatedAt time.Time +} + +// UserInviteInfo contains public information about an invite (for unauthenticated endpoint) +type UserInviteInfo struct { + Email string `json:"email"` + Name string `json:"name"` + ExpiresAt time.Time `json:"expires_at"` + Valid bool `json:"valid"` + InvitedBy string `json:"invited_by"` +} + +// NewInviteID generates a new invite ID using xid +func NewInviteID() string { + return xid.New().String() +} + +// EncryptSensitiveData encrypts the invite's sensitive fields (Email and Name) in place. +func (i *UserInviteRecord) EncryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + var err error + if i.Email != "" { + i.Email, err = enc.Encrypt(i.Email) + if err != nil { + return fmt.Errorf("encrypt email: %w", err) + } + } + + if i.Name != "" { + i.Name, err = enc.Encrypt(i.Name) + if err != nil { + return fmt.Errorf("encrypt name: %w", err) + } + } + + return nil +} + +// DecryptSensitiveData decrypts the invite's sensitive fields (Email and Name) in place. +func (i *UserInviteRecord) DecryptSensitiveData(enc *crypt.FieldEncrypt) error { + if enc == nil { + return nil + } + + var err error + if i.Email != "" { + i.Email, err = enc.Decrypt(i.Email) + if err != nil { + return fmt.Errorf("decrypt email: %w", err) + } + } + + if i.Name != "" { + i.Name, err = enc.Decrypt(i.Name) + if err != nil { + return fmt.Errorf("decrypt name: %w", err) + } + } + + return nil +} + +// Copy creates a deep copy of the UserInviteRecord +func (i *UserInviteRecord) Copy() *UserInviteRecord { + autoGroups := make([]string, len(i.AutoGroups)) + copy(autoGroups, i.AutoGroups) + + return &UserInviteRecord{ + ID: i.ID, + AccountID: i.AccountID, + Email: i.Email, + Name: i.Name, + Role: i.Role, + AutoGroups: autoGroups, + HashedToken: i.HashedToken, + ExpiresAt: i.ExpiresAt, + CreatedAt: i.CreatedAt, + CreatedBy: i.CreatedBy, + } +} diff --git a/management/server/types/user_invite_test.go b/management/server/types/user_invite_test.go new file mode 100644 index 000000000..09dae3800 --- /dev/null +++ b/management/server/types/user_invite_test.go @@ -0,0 +1,355 @@ +package types + +import ( + "crypto/sha256" + b64 "encoding/base64" + "hash/crc32" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/base62" + "github.com/netbirdio/netbird/util/crypt" +) + +func TestUserInviteRecord_TableName(t *testing.T) { + invite := UserInviteRecord{} + assert.Equal(t, "user_invites", invite.TableName()) +} + +func TestGenerateInviteToken_Success(t *testing.T) { + hashedToken, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.NotEmpty(t, hashedToken) + assert.NotEmpty(t, plainToken) +} + +func TestGenerateInviteToken_Length(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.Len(t, plainToken, InviteTokenLength) +} + +func TestGenerateInviteToken_Prefix(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.True(t, strings.HasPrefix(plainToken, InviteTokenPrefix)) +} + +func TestGenerateInviteToken_Hashing(t *testing.T) { + hashedToken, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + expectedHash := sha256.Sum256([]byte(plainToken)) + expectedHashedToken := b64.StdEncoding.EncodeToString(expectedHash[:]) + assert.Equal(t, expectedHashedToken, hashedToken) +} + +func TestGenerateInviteToken_Checksum(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + // Extract parts + secret := plainToken[len(InviteTokenPrefix) : len(InviteTokenPrefix)+InviteTokenSecretLength] + checksumStr := plainToken[len(InviteTokenPrefix)+InviteTokenSecretLength:] + + // Verify checksum + expectedChecksum := crc32.ChecksumIEEE([]byte(secret)) + actualChecksum, err := base62.Decode(checksumStr) + require.NoError(t, err) + assert.Equal(t, expectedChecksum, actualChecksum) +} + +func TestGenerateInviteToken_Uniqueness(t *testing.T) { + tokens := make(map[string]bool) + for i := 0; i < 100; i++ { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + assert.False(t, tokens[plainToken], "Token should be unique") + tokens[plainToken] = true + } +} + +func TestHashInviteToken(t *testing.T) { + token := "nbi_testtoken123456789012345678901234" + hashedToken := HashInviteToken(token) + + expectedHash := sha256.Sum256([]byte(token)) + expectedHashedToken := b64.StdEncoding.EncodeToString(expectedHash[:]) + assert.Equal(t, expectedHashedToken, hashedToken) +} + +func TestHashInviteToken_Consistency(t *testing.T) { + token := "nbi_testtoken123456789012345678901234" + hash1 := HashInviteToken(token) + hash2 := HashInviteToken(token) + assert.Equal(t, hash1, hash2) +} + +func TestHashInviteToken_DifferentTokens(t *testing.T) { + token1 := "nbi_testtoken123456789012345678901234" + token2 := "nbi_testtoken123456789012345678901235" + hash1 := HashInviteToken(token1) + hash2 := HashInviteToken(token2) + assert.NotEqual(t, hash1, hash2) +} + +func TestValidateInviteToken_Success(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + err = ValidateInviteToken(plainToken) + assert.NoError(t, err) +} + +func TestValidateInviteToken_InvalidLength(t *testing.T) { + testCases := []struct { + name string + token string + }{ + {"empty", ""}, + {"too short", "nbi_abc"}, + {"too long", "nbi_" + strings.Repeat("a", 50)}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateInviteToken(tc.token) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid token length") + }) + } +} + +func TestValidateInviteToken_InvalidPrefix(t *testing.T) { + // Create a token with wrong prefix but correct length + token := "xyz_" + strings.Repeat("a", 30) + "000000" + err := ValidateInviteToken(token) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid token prefix") +} + +func TestValidateInviteToken_InvalidChecksum(t *testing.T) { + // Create a token with correct format but invalid checksum + token := InviteTokenPrefix + strings.Repeat("a", InviteTokenSecretLength) + "ZZZZZZ" + err := ValidateInviteToken(token) + require.Error(t, err) + assert.Contains(t, err.Error(), "checksum") +} + +func TestValidateInviteToken_ModifiedToken(t *testing.T) { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + // Modify one character in the secret part + modifiedToken := plainToken[:5] + "X" + plainToken[6:] + err = ValidateInviteToken(modifiedToken) + require.Error(t, err) +} + +func TestUserInviteRecord_IsExpired(t *testing.T) { + t.Run("not expired", func(t *testing.T) { + invite := &UserInviteRecord{ + ExpiresAt: time.Now().Add(time.Hour), + } + assert.False(t, invite.IsExpired()) + }) + + t.Run("expired", func(t *testing.T) { + invite := &UserInviteRecord{ + ExpiresAt: time.Now().Add(-time.Hour), + } + assert.True(t, invite.IsExpired()) + }) + + t.Run("just expired", func(t *testing.T) { + invite := &UserInviteRecord{ + ExpiresAt: time.Now().Add(-time.Second), + } + assert.True(t, invite.IsExpired()) + }) +} + +func TestNewInviteID(t *testing.T) { + id := NewInviteID() + assert.NotEmpty(t, id) + assert.Len(t, id, 20) // xid generates 20 character IDs +} + +func TestNewInviteID_Uniqueness(t *testing.T) { + ids := make(map[string]bool) + for i := 0; i < 100; i++ { + id := NewInviteID() + assert.False(t, ids[id], "ID should be unique") + ids[id] = true + } +} + +func TestUserInviteRecord_EncryptDecryptSensitiveData(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + t.Run("encrypt and decrypt", func(t *testing.T) { + invite := &UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + // Encrypt + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify encrypted values are different from original + assert.NotEqual(t, "test@example.com", invite.Email) + assert.NotEqual(t, "Test User", invite.Name) + + // Decrypt + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify decrypted values match original + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) + + t.Run("encrypt empty fields", func(t *testing.T) { + invite := &UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "", + Name: "", + Role: "user", + } + + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + }) + + t.Run("nil encryptor", func(t *testing.T) { + invite := &UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + err := invite.EncryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + + err = invite.DecryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) +} + +func TestUserInviteRecord_Copy(t *testing.T) { + now := time.Now() + expiresAt := now.Add(72 * time.Hour) + + original := &UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group1", "group2"}, + HashedToken: "hashed-token", + ExpiresAt: expiresAt, + CreatedAt: now, + CreatedBy: "creator-id", + } + + copied := original.Copy() + + // Verify all fields are copied + assert.Equal(t, original.ID, copied.ID) + assert.Equal(t, original.AccountID, copied.AccountID) + assert.Equal(t, original.Email, copied.Email) + assert.Equal(t, original.Name, copied.Name) + assert.Equal(t, original.Role, copied.Role) + assert.Equal(t, original.AutoGroups, copied.AutoGroups) + assert.Equal(t, original.HashedToken, copied.HashedToken) + assert.Equal(t, original.ExpiresAt, copied.ExpiresAt) + assert.Equal(t, original.CreatedAt, copied.CreatedAt) + assert.Equal(t, original.CreatedBy, copied.CreatedBy) + + // Verify deep copy of AutoGroups (modifying copy doesn't affect original) + copied.AutoGroups[0] = "modified" + assert.NotEqual(t, original.AutoGroups[0], copied.AutoGroups[0]) + assert.Equal(t, "group1", original.AutoGroups[0]) +} + +func TestUserInviteRecord_Copy_EmptyAutoGroups(t *testing.T) { + original := &UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + AutoGroups: []string{}, + } + + copied := original.Copy() + assert.NotNil(t, copied.AutoGroups) + assert.Len(t, copied.AutoGroups, 0) +} + +func TestUserInviteRecord_Copy_NilAutoGroups(t *testing.T) { + original := &UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + AutoGroups: nil, + } + + copied := original.Copy() + assert.NotNil(t, copied.AutoGroups) + assert.Len(t, copied.AutoGroups, 0) +} + +func TestInviteTokenConstants(t *testing.T) { + // Verify constants are consistent + expectedLength := len(InviteTokenPrefix) + InviteTokenSecretLength + InviteTokenChecksumLength + assert.Equal(t, InviteTokenLength, expectedLength) + assert.Equal(t, 4, len(InviteTokenPrefix)) + assert.Equal(t, 30, InviteTokenSecretLength) + assert.Equal(t, 6, InviteTokenChecksumLength) + assert.Equal(t, 40, InviteTokenLength) + assert.Equal(t, 259200, DefaultInviteExpirationSeconds) // 72 hours + assert.Equal(t, 3600, MinInviteExpirationSeconds) // 1 hour +} + +func TestGenerateInviteToken_ValidatesOwnOutput(t *testing.T) { + // Generate multiple tokens and ensure they all validate + for i := 0; i < 50; i++ { + _, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + err = ValidateInviteToken(plainToken) + assert.NoError(t, err, "Generated token should always be valid") + } +} + +func TestHashInviteToken_MatchesGeneratedHash(t *testing.T) { + hashedToken, plainToken, err := GenerateInviteToken() + require.NoError(t, err) + + // HashInviteToken should produce the same hash as GenerateInviteToken + rehashedToken := HashInviteToken(plainToken) + assert.Equal(t, hashedToken, rehashedToken) +} diff --git a/management/server/user.go b/management/server/user.go index d12dd4f11..51da7a633 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" "time" + "unicode" nbcontext "github.com/netbirdio/netbird/management/server/context" "github.com/netbirdio/netbird/shared/auth" @@ -249,6 +250,37 @@ func (am *DefaultAccountManager) ListUsers(ctx context.Context, accountID string return am.Store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) } +// UpdateUserPassword updates the password for a user in the embedded IdP. +// This is only available when the embedded IdP is enabled. +// Users can only change their own password. +func (am *DefaultAccountManager) UpdateUserPassword(ctx context.Context, accountID, currentUserID, targetUserID string, oldPassword, newPassword string) error { + if !IsEmbeddedIdp(am.idpManager) { + return status.Errorf(status.PreconditionFailed, "password change is only available with embedded identity provider") + } + + if oldPassword == "" { + return status.Errorf(status.InvalidArgument, "old password is required") + } + + if newPassword == "" { + return status.Errorf(status.InvalidArgument, "new password is required") + } + + embeddedIdp, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return status.Errorf(status.Internal, "failed to get embedded IdP manager") + } + + err := embeddedIdp.UpdateUserPassword(ctx, currentUserID, targetUserID, oldPassword, newPassword) + if err != nil { + return status.Errorf(status.InvalidArgument, "failed to update password: %v", err) + } + + am.StoreEvent(ctx, currentUserID, targetUserID, accountID, activity.UserPasswordChanged, nil) + + return nil +} + func (am *DefaultAccountManager) deleteServiceUser(ctx context.Context, accountID string, initiatorUserID string, targetUser *types.User) error { if err := am.Store.DeleteUser(ctx, accountID, targetUser.Id); err != nil { return err @@ -673,7 +705,7 @@ func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, ac "is_service_user": oldUser.IsServiceUser, "user_name": oldUser.ServiceUserName, } eventsToStore = append(eventsToStore, func() { - am.StoreEvent(ctx, oldUser.Id, oldUser.Id, accountID, activity.GroupAddedToUser, meta) + am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.GroupAddedToUser, meta) }) } @@ -687,7 +719,7 @@ func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, ac "is_service_user": oldUser.IsServiceUser, "user_name": oldUser.ServiceUserName, } eventsToStore = append(eventsToStore, func() { - am.StoreEvent(ctx, oldUser.Id, oldUser.Id, accountID, activity.GroupRemovedFromUser, meta) + am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.GroupRemovedFromUser, meta) }) } @@ -806,7 +838,20 @@ func (am *DefaultAccountManager) getUserInfo(ctx context.Context, user *types.Us } return user.ToUserInfo(userData) } - return user.ToUserInfo(nil) + + userInfo, err := user.ToUserInfo(nil) + if err != nil { + return nil, err + } + + // For embedded IDP users, extract the IdPID (connector ID) from the encoded user ID + if IsEmbeddedIdp(am.idpManager) && !user.IsServiceUser { + if _, connectorID, decodeErr := dex.DecodeDexUserID(user.Id); decodeErr == nil && connectorID != "" { + userInfo.IdPID = connectorID + } + } + + return userInfo, nil } // validateUserUpdate validates the update operation for a user. @@ -1238,7 +1283,7 @@ func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountI addPeerRemovedEvent() } - meta := map[string]any{"name": targetUserInfo.Name, "email": targetUserInfo.Email, "created_at": targetUser.CreatedAt} + meta := map[string]any{"name": targetUserInfo.Name, "email": targetUserInfo.Email, "created_at": targetUser.CreatedAt, "issued": targetUser.Issued} am.StoreEvent(ctx, initiatorUserID, targetUser.Id, accountID, activity.UserDeleted, meta) return updateAccountPeers, nil @@ -1409,3 +1454,368 @@ func (am *DefaultAccountManager) RejectUser(ctx context.Context, accountID, init return nil } + +// CreateUserInvite creates an invite link for a new user in the embedded IdP. +// The user is NOT created until the invite is accepted. +func (am *DefaultAccountManager) CreateUserInvite(ctx context.Context, accountID, initiatorUserID string, invite *types.UserInfo, expiresIn int) (*types.UserInvite, error) { + if !IsEmbeddedIdp(am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + if err := validateUserInvite(invite); err != nil { + return nil, err + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Create) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + // Check if user already exists in NetBird DB + existingUsers, err := am.Store.GetAccountUsers(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, err + } + for _, user := range existingUsers { + if strings.EqualFold(user.Email, invite.Email) { + return nil, status.Errorf(status.UserAlreadyExists, "user with this email already exists") + } + } + + // Check if invite already exists for this email + existingInvite, err := am.Store.GetUserInviteByEmail(ctx, store.LockingStrengthNone, accountID, invite.Email) + if err != nil { + if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound { + return nil, fmt.Errorf("failed to check existing invites: %w", err) + } + } + if existingInvite != nil { + return nil, status.Errorf(status.AlreadyExists, "invite already exists for this email") + } + + // Calculate expiration time + if expiresIn <= 0 { + expiresIn = types.DefaultInviteExpirationSeconds + } + + if expiresIn < types.MinInviteExpirationSeconds { + return nil, status.Errorf(status.InvalidArgument, "invite expiration must be at least 1 hour") + } + expiresAt := time.Now().UTC().Add(time.Duration(expiresIn) * time.Second) + + // Generate invite token + inviteID := types.NewInviteID() + hashedToken, plainToken, err := types.GenerateInviteToken() + if err != nil { + return nil, fmt.Errorf("failed to generate invite token: %w", err) + } + + // Create the invite record (no user created yet) + userInvite := &types.UserInviteRecord{ + ID: inviteID, + AccountID: accountID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: invite.AutoGroups, + HashedToken: hashedToken, + ExpiresAt: expiresAt, + CreatedAt: time.Now().UTC(), + CreatedBy: initiatorUserID, + } + + if err := am.Store.SaveUserInvite(ctx, userInvite); err != nil { + return nil, err + } + + am.StoreEvent(ctx, initiatorUserID, inviteID, accountID, activity.UserInviteLinkCreated, map[string]any{"email": invite.Email}) + + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: inviteID, + Email: invite.Email, + Name: invite.Name, + Role: invite.Role, + AutoGroups: invite.AutoGroups, + Status: string(types.UserStatusInvited), + Issued: types.UserIssuedAPI, + }, + InviteToken: plainToken, + InviteExpiresAt: expiresAt, + }, nil +} + +// GetUserInviteInfo retrieves invite information from a token (public endpoint). +func (am *DefaultAccountManager) GetUserInviteInfo(ctx context.Context, token string) (*types.UserInviteInfo, error) { + if err := types.ValidateInviteToken(token); err != nil { + return nil, status.Errorf(status.InvalidArgument, "invalid invite token: %v", err) + } + + hashedToken := types.HashInviteToken(token) + invite, err := am.Store.GetUserInviteByHashedToken(ctx, store.LockingStrengthNone, hashedToken) + if err != nil { + return nil, err + } + + // Get the inviter's name + invitedBy := "" + if invite.CreatedBy != "" { + inviter, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthNone, invite.CreatedBy) + if err == nil && inviter != nil { + invitedBy = inviter.Name + } + } + + return &types.UserInviteInfo{ + Email: invite.Email, + Name: invite.Name, + ExpiresAt: invite.ExpiresAt, + Valid: !invite.IsExpired(), + InvitedBy: invitedBy, + }, nil +} + +// ListUserInvites returns all invites for an account. +func (am *DefaultAccountManager) ListUserInvites(ctx context.Context, accountID, initiatorUserID string) ([]*types.UserInvite, error) { + if !IsEmbeddedIdp(am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Read) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + records, err := am.Store.GetAccountUserInvites(ctx, store.LockingStrengthNone, accountID) + if err != nil { + return nil, err + } + + invites := make([]*types.UserInvite, 0, len(records)) + for _, record := range records { + invites = append(invites, &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: record.ID, + Email: record.Email, + Name: record.Name, + Role: record.Role, + AutoGroups: record.AutoGroups, + }, + InviteExpiresAt: record.ExpiresAt, + InviteCreatedAt: record.CreatedAt, + }) + } + + return invites, nil +} + +// AcceptUserInvite accepts an invite and creates the user in both IdP and NetBird DB. +func (am *DefaultAccountManager) AcceptUserInvite(ctx context.Context, token, password string) error { + if !IsEmbeddedIdp(am.idpManager) { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + if password == "" { + return status.Errorf(status.InvalidArgument, "password is required") + } + + if err := validatePassword(password); err != nil { + return status.Errorf(status.InvalidArgument, "invalid password: %v", err) + } + + if err := types.ValidateInviteToken(token); err != nil { + return status.Errorf(status.InvalidArgument, "invalid invite token: %v", err) + } + + hashedToken := types.HashInviteToken(token) + invite, err := am.Store.GetUserInviteByHashedToken(ctx, store.LockingStrengthUpdate, hashedToken) + if err != nil { + return err + } + + if invite.IsExpired() { + return status.Errorf(status.InvalidArgument, "invite has expired") + } + + // Create user in Dex with the provided password + embeddedIdp, ok := am.idpManager.(*idp.EmbeddedIdPManager) + if !ok { + return status.Errorf(status.Internal, "failed to get embedded IdP manager") + } + + idpUser, err := embeddedIdp.CreateUserWithPassword(ctx, invite.Email, password, invite.Name) + if err != nil { + return fmt.Errorf("failed to create user in IdP: %w", err) + } + + // Create user in NetBird DB + newUser := &types.User{ + Id: idpUser.ID, + AccountID: invite.AccountID, + Role: types.StrRoleToUserRole(invite.Role), + AutoGroups: invite.AutoGroups, + Issued: types.UserIssuedAPI, + CreatedAt: time.Now().UTC(), + Email: invite.Email, + Name: invite.Name, + } + + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + if err := transaction.SaveUser(ctx, newUser); err != nil { + return fmt.Errorf("failed to save user: %w", err) + } + if err := transaction.DeleteUserInvite(ctx, invite.ID); err != nil { + return fmt.Errorf("failed to delete invite: %w", err) + } + return nil + }) + if err != nil { + // Best-effort rollback: delete the IdP user to avoid orphaned records + if deleteErr := embeddedIdp.DeleteUser(ctx, idpUser.ID); deleteErr != nil { + log.WithContext(ctx).WithError(deleteErr).Errorf("failed to rollback IdP user %s after transaction failure", idpUser.ID) + } + return err + } + + am.StoreEvent(ctx, newUser.Id, newUser.Id, invite.AccountID, activity.UserInviteLinkAccepted, map[string]any{"email": invite.Email}) + + return nil +} + +// RegenerateUserInvite creates a new invite token for an existing invite, invalidating the previous one. +func (am *DefaultAccountManager) RegenerateUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string, expiresIn int) (*types.UserInvite, error) { + if !IsEmbeddedIdp(am.idpManager) { + return nil, status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Update) + if err != nil { + return nil, status.NewPermissionValidationError(err) + } + if !allowed { + return nil, status.NewPermissionDeniedError() + } + + // Get existing invite + existingInvite, err := am.Store.GetUserInviteByID(ctx, store.LockingStrengthUpdate, accountID, inviteID) + if err != nil { + return nil, err + } + + // Calculate expiration time + if expiresIn <= 0 { + expiresIn = types.DefaultInviteExpirationSeconds + } + if expiresIn < types.MinInviteExpirationSeconds { + return nil, status.Errorf(status.InvalidArgument, "invite expiration must be at least 1 hour") + } + expiresAt := time.Now().UTC().Add(time.Duration(expiresIn) * time.Second) + + // Generate new invite token + hashedToken, plainToken, err := types.GenerateInviteToken() + if err != nil { + return nil, fmt.Errorf("failed to generate invite token: %w", err) + } + + // Update existing invite with new token and expiration + existingInvite.HashedToken = hashedToken + existingInvite.ExpiresAt = expiresAt + existingInvite.CreatedBy = initiatorUserID + + err = am.Store.SaveUserInvite(ctx, existingInvite) + if err != nil { + return nil, err + } + + am.StoreEvent(ctx, initiatorUserID, existingInvite.ID, accountID, activity.UserInviteLinkRegenerated, map[string]any{"email": existingInvite.Email}) + + return &types.UserInvite{ + UserInfo: &types.UserInfo{ + ID: existingInvite.ID, + Email: existingInvite.Email, + Name: existingInvite.Name, + Role: existingInvite.Role, + AutoGroups: existingInvite.AutoGroups, + Status: string(types.UserStatusInvited), + Issued: types.UserIssuedAPI, + }, + InviteToken: plainToken, + InviteExpiresAt: expiresAt, + }, nil +} + +// DeleteUserInvite deletes an existing invite by ID. +func (am *DefaultAccountManager) DeleteUserInvite(ctx context.Context, accountID, initiatorUserID, inviteID string) error { + if !IsEmbeddedIdp(am.idpManager) { + return status.Errorf(status.PreconditionFailed, "invite links are only available with embedded identity provider") + } + + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, initiatorUserID, modules.Users, operations.Delete) + if err != nil { + return status.NewPermissionValidationError(err) + } + if !allowed { + return status.NewPermissionDeniedError() + } + + invite, err := am.Store.GetUserInviteByID(ctx, store.LockingStrengthUpdate, accountID, inviteID) + if err != nil { + return err + } + + if err := am.Store.DeleteUserInvite(ctx, inviteID); err != nil { + return err + } + + am.StoreEvent(ctx, initiatorUserID, inviteID, accountID, activity.UserInviteLinkDeleted, map[string]any{"email": invite.Email}) + + return nil +} + +const minPasswordLength = 8 + +// validatePassword checks password strength requirements: +// - Minimum 8 characters +// - At least 1 digit +// - At least 1 uppercase letter +// - At least 1 special character +func validatePassword(password string) error { + if len(password) < minPasswordLength { + return errors.New("password must be at least 8 characters long") + } + + var hasDigit, hasUpper, hasSpecial bool + for _, c := range password { + switch { + case unicode.IsDigit(c): + hasDigit = true + case unicode.IsUpper(c): + hasUpper = true + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + hasSpecial = true + } + } + + var missing []string + if !hasDigit { + missing = append(missing, "one digit") + } + if !hasUpper { + missing = append(missing, "one uppercase letter") + } + if !hasSpecial { + missing = append(missing, "one special character") + } + + if len(missing) > 0 { + return errors.New("password must contain at least " + strings.Join(missing, ", ")) + } + + return nil +} diff --git a/management/server/user_invite_test.go b/management/server/user_invite_test.go new file mode 100644 index 000000000..6256ed44a --- /dev/null +++ b/management/server/user_invite_test.go @@ -0,0 +1,1010 @@ +package server + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/permissions" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" + "github.com/netbirdio/netbird/util/crypt" +) + +const ( + testAccountID = "testAccountID" + testAdminUserID = "testAdminUserID" + testRegularUserID = "testRegularUserID" +) + +// setupInviteTestManagerWithEmbeddedIdP creates a test manager with a real embedded IdP +// and store encryption enabled. This is required for tests that need to pass the IsEmbeddedIdp check. +func setupInviteTestManagerWithEmbeddedIdP(t *testing.T) (*DefaultAccountManager, func()) { + t.Helper() + ctx := context.Background() + + tmpDir := t.TempDir() + dexDataDir := tmpDir + "/dex" + require.NoError(t, os.MkdirAll(dexDataDir, 0700)) + + // Create test store + s, cleanup, err := store.NewTestStoreFromSQL(ctx, "", tmpDir) + require.NoError(t, err, "Error when creating store") + + // Enable encryption + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + s.SetFieldEncrypt(fieldEncrypt) + + // Create embedded IDP config + embeddedIdPConfig := &idp.EmbeddedIdPConfig{ + Enabled: true, + Issuer: "http://localhost:5556/dex", + Storage: idp.EmbeddedStorageConfig{ + Type: "sqlite3", + Config: idp.EmbeddedStorageTypeConfig{ + File: dexDataDir + "/dex.db", + }, + }, + } + + // Create embedded IDP manager + embeddedIdp, err := idp.NewEmbeddedIdPManager(ctx, embeddedIdPConfig, nil) + require.NoError(t, err) + + account := newAccountWithId(ctx, testAccountID, testAdminUserID, "", "admin@test.com", "Admin User", false) + account.Users[testRegularUserID] = &types.User{ + Id: testRegularUserID, + AccountID: testAccountID, + Role: types.UserRoleUser, + Email: "regular@test.com", + Name: "Regular User", + } + + err = s.SaveAccount(ctx, account) + require.NoError(t, err, "Error when saving account") + + permissionsManager := permissions.NewManager(s) + + am := DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: permissionsManager, + idpManager: embeddedIdp, + } + + cleanupFunc := func() { + _ = embeddedIdp.Stop(ctx) + cleanup() + } + + return &am, cleanupFunc +} + +func TestCreateUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, "newuser@test.com", result.UserInfo.Email) + assert.Equal(t, "New User", result.UserInfo.Name) + assert.Equal(t, "user", result.UserInfo.Role) + assert.Equal(t, string(types.UserStatusInvited), result.UserInfo.Status) + assert.NotEmpty(t, result.InviteToken) + assert.True(t, result.InviteExpiresAt.After(time.Now())) + + // Verify invite is stored in DB + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + assert.Len(t, invites, 1) + assert.Equal(t, "newuser@test.com", invites[0].Email) +} + +func TestCreateUserInvite_DuplicateEmail(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + // Create first invite + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Try to create duplicate invite + _, err = am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.AlreadyExists, sErr.Type()) +} + +func TestCreateUserInvite_ExistingUserEmail(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Try to invite with an email that already exists as a user + invite := &types.UserInfo{ + Email: "regular@test.com", // Already exists as a user + Name: "Duplicate User", + Role: "user", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.UserAlreadyExists, sErr.Type()) +} + +func TestCreateUserInvite_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + // Regular user should not be able to create invites + _, err := am.CreateUserInvite(context.Background(), testAccountID, testRegularUserID, invite, 0) + require.Error(t, err) +} + +func TestCreateUserInvite_InvalidEmail(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestCreateUserInvite_InvalidName(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "", + Role: "user", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestCreateUserInvite_OwnerRole(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newowner@test.com", + Name: "New Owner", + Role: "owner", + AutoGroups: []string{}, + } + + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestCreateUserInvite_ExpirationTooShort(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + // Try to create with expiration less than 1 hour + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 1800) // 30 minutes + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) + assert.Contains(t, err.Error(), "at least 1 hour") +} + +func TestCreateUserInvite_CustomExpiration(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + expiresIn := 7200 // 2 hours + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, expiresIn) + require.NoError(t, err) + + // Verify expiration is approximately 2 hours from now + expectedExpiration := time.Now().Add(time.Duration(expiresIn) * time.Second) + assert.WithinDuration(t, expectedExpiration, result.InviteExpiresAt, time.Minute) +} + +func TestCreateUserInvite_WithAutoGroups(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{"group1", "group2"}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + assert.Equal(t, []string{"group1", "group2"}, result.UserInfo.AutoGroups) + + // Verify invite in DB has auto groups + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + require.Len(t, invites, 1) + assert.Equal(t, []string{"group1", "group2"}, invites[0].AutoGroups) +} + +func TestGetUserInviteInfo_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Get the invite info using the token + info, err := am.GetUserInviteInfo(context.Background(), result.InviteToken) + require.NoError(t, err) + require.NotNil(t, info) + + assert.Equal(t, "newuser@test.com", info.Email) + assert.Equal(t, "New User", info.Name) + assert.True(t, info.Valid) + assert.Equal(t, "Admin User", info.InvitedBy) +} + +func TestGetUserInviteInfo_InvalidToken(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + _, err := am.GetUserInviteInfo(context.Background(), "invalid_token") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestGetUserInviteInfo_TokenNotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Generate a valid token format that doesn't exist in DB + _, validToken, err := types.GenerateInviteToken() + require.NoError(t, err) + + _, err = am.GetUserInviteInfo(context.Background(), validToken) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestGetUserInviteInfo_ExpiredInvite(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite with valid expiration + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Manually set the invite to expired by updating the store directly + inviteRecord, err := am.Store.GetUserInviteByID(context.Background(), store.LockingStrengthUpdate, testAccountID, result.UserInfo.ID) + require.NoError(t, err) + inviteRecord.ExpiresAt = time.Now().Add(-time.Hour) // Set to 1 hour ago + err = am.Store.SaveUserInvite(context.Background(), inviteRecord) + require.NoError(t, err) + + // Get the invite info - should still return info but Valid should be false + info, err := am.GetUserInviteInfo(context.Background(), result.InviteToken) + require.NoError(t, err) + assert.False(t, info.Valid) +} + +func TestListUserInvites_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create multiple invites + for i, email := range []string{"user1@test.com", "user2@test.com", "user3@test.com"} { + invite := &types.UserInfo{ + Email: email, + Name: "User " + string(rune('1'+i)), + Role: "user", + AutoGroups: []string{}, + } + _, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + } + + // List invites + invites, err := am.ListUserInvites(context.Background(), testAccountID, testAdminUserID) + require.NoError(t, err) + assert.Len(t, invites, 3) +} + +func TestListUserInvites_Empty(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + invites, err := am.ListUserInvites(context.Background(), testAccountID, testAdminUserID) + require.NoError(t, err) + assert.Len(t, invites, 0) +} + +func TestListUserInvites_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + _, err := am.ListUserInvites(context.Background(), testAccountID, testRegularUserID) + require.Error(t, err) +} + +func TestRegenerateUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + originalResult, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Regenerate the invite + newResult, err := am.RegenerateUserInvite(context.Background(), testAccountID, testAdminUserID, originalResult.UserInfo.ID, 0) + require.NoError(t, err) + require.NotNil(t, newResult) + + // Verify invite ID remains the same (stable ID for clients) + assert.Equal(t, originalResult.UserInfo.ID, newResult.UserInfo.ID) + + // Verify new token is different + assert.NotEqual(t, originalResult.InviteToken, newResult.InviteToken) + assert.Equal(t, "newuser@test.com", newResult.UserInfo.Email) + assert.Equal(t, "New User", newResult.UserInfo.Name) + + // Verify old token no longer works + _, err = am.GetUserInviteInfo(context.Background(), originalResult.InviteToken) + require.Error(t, err) + + // Verify new token works + info, err := am.GetUserInviteInfo(context.Background(), newResult.InviteToken) + require.NoError(t, err) + assert.Equal(t, "newuser@test.com", info.Email) +} + +func TestRegenerateUserInvite_NotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + _, err := am.RegenerateUserInvite(context.Background(), testAccountID, testAdminUserID, "nonexistent-id", 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestRegenerateUserInvite_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Regular user should not be able to regenerate + _, err = am.RegenerateUserInvite(context.Background(), testAccountID, testRegularUserID, result.UserInfo.ID, 0) + require.Error(t, err) +} + +func TestDeleteUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Delete the invite + err = am.DeleteUserInvite(context.Background(), testAccountID, testAdminUserID, result.UserInfo.ID) + require.NoError(t, err) + + // Verify invite is deleted + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + assert.Len(t, invites, 0) + + // Verify token no longer works + _, err = am.GetUserInviteInfo(context.Background(), result.InviteToken) + require.Error(t, err) +} + +func TestDeleteUserInvite_NotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + err := am.DeleteUserInvite(context.Background(), testAccountID, testAdminUserID, "nonexistent-id") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestDeleteUserInvite_PermissionDenied(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite first + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Regular user should not be able to delete + err = am.DeleteUserInvite(context.Background(), testAccountID, testRegularUserID, result.UserInfo.ID) + require.Error(t, err) +} + +func TestDeleteUserInvite_WrongAccount(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Create another account + anotherAccountID := "anotherAccountID" + anotherAdminID := "anotherAdminID" + anotherAccount := newAccountWithId(context.Background(), anotherAccountID, anotherAdminID, "", "otheradmin@test.com", "Other Admin", false) + err = am.Store.SaveAccount(context.Background(), anotherAccount) + require.NoError(t, err) + + // Try to delete from wrong account + err = am.DeleteUserInvite(context.Background(), anotherAccountID, anotherAdminID, result.UserInfo.ID) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestAcceptUserInvite_Success(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Accept the invite with a valid password + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "Password1!") + require.NoError(t, err) + + // Verify user is created in DB + users, err := am.Store.GetAccountUsers(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + + var foundUser *types.User + for _, u := range users { + if u.Email == "newuser@test.com" { + foundUser = u + break + } + } + require.NotNil(t, foundUser, "User should be created in DB") + assert.Equal(t, "New User", foundUser.Name) + assert.Equal(t, types.UserRoleUser, foundUser.Role) + + // Verify invite is deleted + invites, err := am.Store.GetAccountUserInvites(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + assert.Len(t, invites, 0) +} + +func TestAcceptUserInvite_InvalidToken(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + err := am.AcceptUserInvite(context.Background(), "invalid_token", "Password1!") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) +} + +func TestAcceptUserInvite_TokenNotFound(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Generate a valid token format that doesn't exist in DB + _, validToken, err := types.GenerateInviteToken() + require.NoError(t, err) + + err = am.AcceptUserInvite(context.Background(), validToken, "Password1!") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.NotFound, sErr.Type()) +} + +func TestAcceptUserInvite_ExpiredToken(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite with valid expiration + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Manually set the invite to expired by updating the store directly + inviteRecord, err := am.Store.GetUserInviteByID(context.Background(), store.LockingStrengthUpdate, testAccountID, result.UserInfo.ID) + require.NoError(t, err) + inviteRecord.ExpiresAt = time.Now().Add(-time.Hour) // Set to 1 hour ago + err = am.Store.SaveUserInvite(context.Background(), inviteRecord) + require.NoError(t, err) + + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "Password1!") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) + assert.Contains(t, err.Error(), "expired") +} + +func TestAcceptUserInvite_EmptyPassword(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "") + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.InvalidArgument, sErr.Type()) + assert.Contains(t, err.Error(), "password is required") +} + +func TestAcceptUserInvite_WeakPassword(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + testCases := []struct { + name string + password string + expectedMsg string + }{ + {"too short", "Pass1!", "at least 8 characters"}, + {"no digit", "Password!", "one digit"}, + {"no uppercase", "password1!", "one uppercase"}, + {"no special", "Password1", "one special character"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := am.AcceptUserInvite(context.Background(), result.InviteToken, tc.password) + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedMsg) + }) + } +} + +func TestValidatePassword(t *testing.T) { + testCases := []struct { + name string + password string + expectError bool + errorMsg string + }{ + {"valid password", "Password1!", false, ""}, + {"valid complex password", "MyP@ssw0rd#2024", false, ""}, + {"too short", "Pass1!", true, "at least 8 characters"}, + {"no digit", "Password!", true, "one digit"}, + {"no uppercase", "password1!", true, "one uppercase"}, + {"no special", "Password1", true, "one special character"}, + {"only lowercase", "password", true, "one digit"}, + {"no uppercase no special", "password1", true, "one uppercase"}, + {"all lowercase short", "pass", true, "at least 8 characters"}, + {"empty", "", true, "at least 8 characters"}, + {"spaces count as special", "Pass word1", false, ""}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := validatePassword(tc.password) + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestInviteToken_GenerateAndValidate(t *testing.T) { + hashedToken, plainToken, err := types.GenerateInviteToken() + require.NoError(t, err) + require.NotEmpty(t, hashedToken) + require.NotEmpty(t, plainToken) + + // Validate token format + assert.Len(t, plainToken, types.InviteTokenLength) + assert.True(t, len(plainToken) > len(types.InviteTokenPrefix)) + assert.Equal(t, types.InviteTokenPrefix, plainToken[:len(types.InviteTokenPrefix)]) + + // Validate checksum + err = types.ValidateInviteToken(plainToken) + require.NoError(t, err) + + // Verify hashing is consistent + hashedAgain := types.HashInviteToken(plainToken) + assert.Equal(t, hashedToken, hashedAgain) +} + +func TestInviteToken_ValidateInvalid(t *testing.T) { + testCases := []struct { + name string + token string + }{ + {"empty", ""}, + {"too short", "nbi_abc"}, + {"wrong prefix", "xyz_123456789012345678901234567890"}, + {"invalid checksum", "nbi_123456789012345678901234567890abcdef"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := types.ValidateInviteToken(tc.token) + require.Error(t, err) + }) + } +} + +func TestUserInviteRecord_IsExpired(t *testing.T) { + // Not expired + invite := &types.UserInviteRecord{ + ExpiresAt: time.Now().Add(time.Hour), + } + assert.False(t, invite.IsExpired()) + + // Expired + invite = &types.UserInviteRecord{ + ExpiresAt: time.Now().Add(-time.Hour), + } + assert.True(t, invite.IsExpired()) +} + +func TestUserInviteRecord_Copy(t *testing.T) { + original := &types.UserInviteRecord{ + ID: "invite-id", + AccountID: "account-id", + Email: "test@example.com", + Name: "Test User", + Role: "user", + AutoGroups: []string{"group1", "group2"}, + HashedToken: "hashed-token", + ExpiresAt: time.Now().Add(time.Hour), + CreatedAt: time.Now(), + CreatedBy: "creator-id", + } + + copied := original.Copy() + + assert.Equal(t, original.ID, copied.ID) + assert.Equal(t, original.AccountID, copied.AccountID) + assert.Equal(t, original.Email, copied.Email) + assert.Equal(t, original.Name, copied.Name) + assert.Equal(t, original.Role, copied.Role) + assert.Equal(t, original.AutoGroups, copied.AutoGroups) + assert.Equal(t, original.HashedToken, copied.HashedToken) + assert.Equal(t, original.ExpiresAt, copied.ExpiresAt) + assert.Equal(t, original.CreatedAt, copied.CreatedAt) + assert.Equal(t, original.CreatedBy, copied.CreatedBy) + + // Verify deep copy of AutoGroups + copied.AutoGroups[0] = "modified" + assert.NotEqual(t, original.AutoGroups[0], copied.AutoGroups[0]) +} + +func TestCreateUserInvite_NonEmbeddedIdP(t *testing.T) { + s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir()) + require.NoError(t, err) + defer cleanup() + + account := newAccountWithId(context.Background(), testAccountID, testAdminUserID, "", "admin@test.com", "Admin User", false) + err = s.SaveAccount(context.Background(), account) + require.NoError(t, err) + + permissionsManager := permissions.NewManager(s) + + // Use nil IDP manager (non-embedded) + am := DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: permissionsManager, + idpManager: nil, + } + + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "user", + AutoGroups: []string{}, + } + + _, err = am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.Error(t, err) + + sErr, ok := status.FromError(err) + require.True(t, ok) + assert.Equal(t, status.PreconditionFailed, sErr.Type()) + assert.Contains(t, err.Error(), "embedded identity provider") +} + +func TestAcceptUserInvite_WithAutoGroups(t *testing.T) { + am, cleanup := setupInviteTestManagerWithEmbeddedIdP(t) + defer cleanup() + + // Create an invite with auto groups + invite := &types.UserInfo{ + Email: "newuser@test.com", + Name: "New User", + Role: "admin", + AutoGroups: []string{"group1", "group2"}, + } + + result, err := am.CreateUserInvite(context.Background(), testAccountID, testAdminUserID, invite, 0) + require.NoError(t, err) + + // Accept the invite + err = am.AcceptUserInvite(context.Background(), result.InviteToken, "Password1!") + require.NoError(t, err) + + // Verify user has the auto groups and role + users, err := am.Store.GetAccountUsers(context.Background(), store.LockingStrengthNone, testAccountID) + require.NoError(t, err) + + var foundUser *types.User + for _, u := range users { + if u.Email == "newuser@test.com" { + foundUser = u + break + } + } + require.NotNil(t, foundUser) + assert.Equal(t, types.UserRoleAdmin, foundUser.Role) + assert.Equal(t, []string{"group1", "group2"}, foundUser.AutoGroups) +} + +func TestUserInvite_EncryptDecryptSensitiveData(t *testing.T) { + key, err := crypt.GenerateKey() + require.NoError(t, err) + fieldEncrypt, err := crypt.NewFieldEncrypt(key) + require.NoError(t, err) + + t.Run("encrypt and decrypt", func(t *testing.T) { + invite := &types.UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + // Encrypt + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify encrypted values are different from original + assert.NotEqual(t, "test@example.com", invite.Email) + assert.NotEqual(t, "Test User", invite.Name) + + // Decrypt + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Verify decrypted values match original + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) + + t.Run("encrypt empty fields", func(t *testing.T) { + invite := &types.UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "", + Name: "", + Role: "user", + } + + // Encrypt empty fields + err := invite.EncryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Empty strings should remain empty + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + + // Decrypt empty fields + err = invite.DecryptSensitiveData(fieldEncrypt) + require.NoError(t, err) + + // Should still be empty + assert.Equal(t, "", invite.Email) + assert.Equal(t, "", invite.Name) + }) + + t.Run("nil encryptor", func(t *testing.T) { + invite := &types.UserInviteRecord{ + ID: "test-invite", + AccountID: "test-account", + Email: "test@example.com", + Name: "Test User", + Role: "user", + } + + // Encrypt with nil encryptor should be no-op + err := invite.EncryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + + // Decrypt with nil encryptor should be no-op + err = invite.DecryptSensitiveData(nil) + require.NoError(t, err) + assert.Equal(t, "test@example.com", invite.Email) + assert.Equal(t, "Test User", invite.Name) + }) +} diff --git a/shared/auth/jwt/validator.go b/shared/auth/jwt/validator.go index ede7acea5..aeaa5842c 100644 --- a/shared/auth/jwt/validator.go +++ b/shared/auth/jwt/validator.go @@ -72,8 +72,8 @@ var ( func NewValidator(issuer string, audienceList []string, keysLocation string, idpSignkeyRefreshEnabled bool) *Validator { keys, err := getPemKeys(keysLocation) - if err != nil { - log.WithField("keysLocation", keysLocation).Errorf("could not get keys from location: %s", err) + if err != nil && !strings.Contains(keysLocation, "localhost") { + log.WithField("keysLocation", keysLocation).Warnf("could not get keys from location: %s, it will try again on the next http request", err) } return &Validator{ diff --git a/shared/management/client/client.go b/shared/management/client/client.go index 3126bcd1f..b92c636c5 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -14,6 +14,7 @@ import ( type Client interface { io.Closer Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error + Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error GetServerPublicKey() (*wgtypes.Key, error) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) Login(serverKey wgtypes.Key, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index 64f6831f2..a11f863a7 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -18,12 +18,13 @@ import ( "google.golang.org/grpc/status" "github.com/netbirdio/management-integrations/integrations" + ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" "github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel" "github.com/netbirdio/netbird/management/internals/modules/peers" - "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager" nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc" + "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/encryption" @@ -92,6 +93,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { peersManger := peers.NewManager(store, permissionsManagerMock) settingsManagerMock := settings.NewMockManager(ctrl) + jobManager := job.NewJobManager(nil, store, peersManger) ia, _ := integrations.NewIntegratedValidator(context.Background(), peersManger, settingsManagerMock, eventStore) @@ -117,8 +119,8 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { ctx := context.Background() updateManager := update_channel.NewPeersUpdateManager(metrics) requestBuffer := mgmt.NewAccountRequestBuffer(ctx, store) - networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), manager.NewEphemeralManager(store, peersManger), config) - accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) + networkMapController := controller.NewController(ctx, store, metrics, updateManager, requestBuffer, mgmt.MockIntegratedValidator{}, settingsMockManager, "netbird.selfhosted", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManger), config) + accountManager, err := mgmt.BuildManager(context.Background(), config, store, networkMapController, jobManager, nil, "", eventStore, nil, false, ia, metrics, port_forwarding.NewControllerMock(), settingsMockManager, permissionsManagerMock, false) if err != nil { t.Fatal(err) } @@ -129,7 +131,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) { if err != nil { t.Fatal(err) } - mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil) + mgmtServer, err := nbgrpc.NewServer(config, accountManager, settingsMockManager, jobManager, secretsManager, nil, nil, mgmt.MockIntegratedValidator{}, networkMapController, nil) if err != nil { t.Fatal(err) } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 89860ac9b..d54c8f870 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -12,6 +12,7 @@ import ( gstatus "google.golang.org/grpc/status" "github.com/cenkalti/backoff/v4" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc" @@ -111,8 +112,26 @@ func (c *GrpcClient) ready() bool { // Sync wraps the real client's Sync endpoint call and takes care of retries and encryption/decryption of messages // Blocking request. The result will be sent via msgHandler callback function func (c *GrpcClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error { - backOff := defaultBackoff(ctx) + return c.withMgmtStream(ctx, func(ctx context.Context, serverPubKey wgtypes.Key) error { + return c.handleSyncStream(ctx, serverPubKey, sysInfo, msgHandler) + }) +} +// Job wraps the real client's Job endpoint call and takes care of retries and encryption/decryption of messages +// Blocking request. The result will be sent via msgHandler callback function +func (c *GrpcClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error { + return c.withMgmtStream(ctx, func(ctx context.Context, serverPubKey wgtypes.Key) error { + return c.handleJobStream(ctx, serverPubKey, msgHandler) + }) +} + +// withMgmtStream runs a streaming operation against the ManagementService +// It takes care of retries, connection readiness, and fetching server public key. +func (c *GrpcClient) withMgmtStream( + ctx context.Context, + handler func(ctx context.Context, serverPubKey wgtypes.Key) error, +) error { + backOff := defaultBackoff(ctx) operation := func() error { log.Debugf("management connection state %v", c.conn.GetState()) connState := c.conn.GetState() @@ -130,7 +149,7 @@ func (c *GrpcClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler return err } - return c.handleStream(ctx, *serverPubKey, sysInfo, msgHandler, backOff) + return handler(ctx, *serverPubKey) } err := backoff.Retry(operation, backOff) @@ -141,12 +160,153 @@ func (c *GrpcClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler return err } -func (c *GrpcClient) handleStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info, - msgHandler func(msg *proto.SyncResponse) error, backOff backoff.BackOff) error { +func (c *GrpcClient) handleJobStream( + ctx context.Context, + serverPubKey wgtypes.Key, + msgHandler func(msg *proto.JobRequest) *proto.JobResponse, +) error { ctx, cancelStream := context.WithCancel(ctx) defer cancelStream() - stream, err := c.connectToStream(ctx, serverPubKey, sysInfo) + stream, err := c.realClient.Job(ctx) + if err != nil { + log.Errorf("failed to open job stream: %v", err) + return err + } + + // Handshake with the server + if err := c.sendHandshake(ctx, stream, serverPubKey); err != nil { + return err + } + + log.Debug("job stream handshake sent successfully") + + // Main loop: receive, process, respond + for { + jobReq, err := c.receiveJobRequest(ctx, stream, serverPubKey) + if err != nil { + if s, ok := gstatus.FromError(err); ok { + switch s.Code() { + case codes.PermissionDenied: + c.notifyDisconnected(err) + return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer + case codes.Canceled: + log.Debugf("management connection context has been canceled, this usually indicates shutdown") + return err + case codes.Unimplemented: + log.Warn("Job feature is not supported by the current management server version. " + + "Please update the management service to use this feature.") + return nil + default: + c.notifyDisconnected(err) + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err + } + } else { + // non-gRPC error + c.notifyDisconnected(err) + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err + } + } + + if jobReq == nil || len(jobReq.ID) == 0 { + log.Debug("received unknown or empty job request, skipping") + continue + } + + log.Infof("received a new job from the management server (ID: %s)", jobReq.ID) + jobResp := c.processJobRequest(ctx, jobReq, msgHandler) + if err := c.sendJobResponse(ctx, stream, serverPubKey, jobResp); err != nil { + return err + } + } +} + +// sendHandshake sends the initial handshake message +func (c *GrpcClient) sendHandshake(ctx context.Context, stream proto.ManagementService_JobClient, serverPubKey wgtypes.Key) error { + handshakeReq := &proto.JobRequest{ + ID: []byte(uuid.New().String()), + } + encHello, err := encryption.EncryptMessage(serverPubKey, c.key, handshakeReq) + if err != nil { + log.Errorf("failed to encrypt handshake message: %v", err) + return err + } + return stream.Send(&proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encHello, + }) +} + +// receiveJobRequest waits for and decrypts a job request +func (c *GrpcClient) receiveJobRequest( + ctx context.Context, + stream proto.ManagementService_JobClient, + serverPubKey wgtypes.Key, +) (*proto.JobRequest, error) { + encryptedMsg, err := stream.Recv() + if err != nil { + return nil, err + } + + jobReq := &proto.JobRequest{} + if err := encryption.DecryptMessage(serverPubKey, c.key, encryptedMsg.Body, jobReq); err != nil { + log.Warnf("failed to decrypt job request: %v", err) + return nil, err + } + + return jobReq, nil +} + +// processJobRequest executes the handler and ensures a valid response +func (c *GrpcClient) processJobRequest( + ctx context.Context, + jobReq *proto.JobRequest, + msgHandler func(msg *proto.JobRequest) *proto.JobResponse, +) *proto.JobResponse { + jobResp := msgHandler(jobReq) + if jobResp == nil { + jobResp = &proto.JobResponse{ + ID: jobReq.ID, + Status: proto.JobStatus_failed, + Reason: []byte("handler returned nil response"), + } + log.Warnf("job handler returned nil for job %s", string(jobReq.ID)) + } + return jobResp +} + +// sendJobResponse encrypts and sends a job response +func (c *GrpcClient) sendJobResponse( + ctx context.Context, + stream proto.ManagementService_JobClient, + serverPubKey wgtypes.Key, + resp *proto.JobResponse, +) error { + encResp, err := encryption.EncryptMessage(serverPubKey, c.key, resp) + if err != nil { + log.Errorf("failed to encrypt job response for job %s: %v", string(resp.ID), err) + return err + } + + if err := stream.Send(&proto.EncryptedMessage{ + WgPubKey: c.key.PublicKey().String(), + Body: encResp, + }); err != nil { + log.Errorf("failed to send job response for job %s: %v", string(resp.ID), err) + return err + } + + log.Infof("job response sent for job %s (status: %s)", string(resp.ID), resp.Status.String()) + return nil +} + +func (c *GrpcClient) handleSyncStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error { + ctx, cancelStream := context.WithCancel(ctx) + defer cancelStream() + + stream, err := c.connectToSyncStream(ctx, serverPubKey, sysInfo) if err != nil { log.Debugf("failed to open Management Service stream: %s", err) if s, ok := gstatus.FromError(err); ok && s.Code() == codes.PermissionDenied { @@ -159,20 +319,22 @@ func (c *GrpcClient) handleStream(ctx context.Context, serverPubKey wgtypes.Key, c.notifyConnected() // blocking until error - err = c.receiveEvents(stream, serverPubKey, msgHandler) - // we need this reset because after a successful connection and a consequent error, backoff lib doesn't - // reset times and next try will start with a long delay - backOff.Reset() + err = c.receiveUpdatesEvents(stream, serverPubKey, msgHandler) if err != nil { c.notifyDisconnected(err) - s, _ := gstatus.FromError(err) - switch s.Code() { - case codes.PermissionDenied: - return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer - case codes.Canceled: - log.Debugf("management connection context has been canceled, this usually indicates shutdown") - return nil - default: + if s, ok := gstatus.FromError(err); ok { + switch s.Code() { + case codes.PermissionDenied: + return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer + case codes.Canceled: + log.Debugf("management connection context has been canceled, this usually indicates shutdown") + return nil + default: + log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) + return err + } + } else { + // non-gRPC error log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err) return err } @@ -191,7 +353,7 @@ func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, err ctx, cancelStream := context.WithCancel(c.ctx) defer cancelStream() - stream, err := c.connectToStream(ctx, *serverPubKey, sysInfo) + stream, err := c.connectToSyncStream(ctx, *serverPubKey, sysInfo) if err != nil { log.Debugf("failed to open Management Service stream: %s", err) return nil, err @@ -224,7 +386,7 @@ func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, err return decryptedResp.GetNetworkMap(), nil } -func (c *GrpcClient) connectToStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info) (proto.ManagementService_SyncClient, error) { +func (c *GrpcClient) connectToSyncStream(ctx context.Context, serverPubKey wgtypes.Key, sysInfo *system.Info) (proto.ManagementService_SyncClient, error) { req := &proto.SyncRequest{Meta: infoToMetaData(sysInfo)} myPrivateKey := c.key @@ -243,7 +405,7 @@ func (c *GrpcClient) connectToStream(ctx context.Context, serverPubKey wgtypes.K return sync, nil } -func (c *GrpcClient) receiveEvents(stream proto.ManagementService_SyncClient, serverPubKey wgtypes.Key, msgHandler func(msg *proto.SyncResponse) error) error { +func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncClient, serverPubKey wgtypes.Key, msgHandler func(msg *proto.SyncResponse) error) error { for { update, err := stream.Recv() if err == io.EOF { diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 29006c9c3..ac96f7b36 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -20,6 +20,7 @@ type MockClient struct { GetPKCEAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) SyncMetaFunc func(sysInfo *system.Info) error LogoutFunc func() error + JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error } func (m *MockClient) IsHealthy() bool { @@ -40,6 +41,13 @@ func (m *MockClient) Sync(ctx context.Context, sysInfo *system.Info, msgHandler return m.SyncFunc(ctx, sysInfo, msgHandler) } +func (m *MockClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error { + if m.JobFunc == nil { + return nil + } + return m.JobFunc(ctx, msgHandler) +} + func (m *MockClient) GetServerPublicKey() (*wgtypes.Key, error) { if m.GetServerPublicKeyFunc == nil { return nil, nil diff --git a/shared/management/http/api/generate.sh b/shared/management/http/api/generate.sh index 2f24fd903..3770ea90f 100755 --- a/shared/management/http/api/generate.sh +++ b/shared/management/http/api/generate.sh @@ -11,6 +11,6 @@ fi old_pwd=$(pwd) script_path=$(dirname $(realpath "$0")) cd "$script_path" -go install github.com/deepmap/oapi-codegen/cmd/oapi-codegen@4a1477f6a8ba6ca8115cc23bb2fb67f0b9fca18e +go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@latest oapi-codegen --config cfg.yaml openapi.yml -cd "$old_pwd" \ No newline at end of file +cd "$old_pwd" diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index a61281ab6..192caee5a 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -40,8 +40,142 @@ tags: description: Interact with and view information about reverse proxies. - name: Instance description: Instance setup and status endpoints for initial configuration. + - name: Jobs + description: Interact with and view information about remote jobs. + x-experimental: true + components: schemas: + PasswordChangeRequest: + type: object + properties: + old_password: + description: The current password + type: string + example: "currentPassword123" + new_password: + description: The new password to set + type: string + example: "newSecurePassword456" + required: + - old_password + - new_password + WorkloadType: + type: string + description: | + Identifies the type of workload the job will execute. + Currently only `"bundle"` is supported. + enum: + - bundle + example: "bundle" + BundleParameters: + type: object + description: These parameters control what gets included in the bundle and how it is processed. + properties: + bundle_for: + type: boolean + description: Whether to generate a bundle for the given timeframe. + example: true + bundle_for_time: + type: integer + minimum: 1 + maximum: 5 + description: Time period in minutes for which to generate the bundle. + example: 2 + log_file_count: + type: integer + minimum: 1 + maximum: 1000 + description: Maximum number of log files to include in the bundle. + example: 100 + anonymize: + type: boolean + description: Whether sensitive data should be anonymized in the bundle. + example: false + required: + - bundle_for + - bundle_for_time + - log_file_count + - anonymize + BundleResult: + type: object + properties: + upload_key: + type: string + example: "upload_key_123" + nullable: true + BundleWorkloadRequest: + type: object + properties: + type: + $ref: '#/components/schemas/WorkloadType' + parameters: + $ref: '#/components/schemas/BundleParameters' + required: + - type + - parameters + BundleWorkloadResponse: + type: object + properties: + type: + $ref: '#/components/schemas/WorkloadType' + parameters: + $ref: '#/components/schemas/BundleParameters' + result: + $ref: '#/components/schemas/BundleResult' + required: + - type + - parameters + - result + WorkloadRequest: + oneOf: + - $ref: '#/components/schemas/BundleWorkloadRequest' + discriminator: + propertyName: type + mapping: + bundle: '#/components/schemas/BundleWorkloadRequest' + WorkloadResponse: + oneOf: + - $ref: '#/components/schemas/BundleWorkloadResponse' + discriminator: + propertyName: type + mapping: + bundle: '#/components/schemas/BundleWorkloadResponse' + JobRequest: + type: object + properties: + workload: + $ref: '#/components/schemas/WorkloadRequest' + required: + - workload + JobResponse: + type: object + properties: + id: + type: string + created_at: + type: string + format: date-time + completed_at: + type: string + format: date-time + nullable: true + triggered_by: + type: string + status: + type: string + enum: [pending, succeeded, failed] + failed_reason: + type: string + nullable: true + workload: + $ref: '#/components/schemas/WorkloadResponse' + required: + - id + - created_at + - status + - triggered_by + - workload Account: type: object properties: @@ -356,6 +490,171 @@ components: - role - auto_groups - is_service_user + UserInviteCreateRequest: + type: object + description: Request to create a user invite link + properties: + email: + description: User's email address + type: string + example: user@example.com + name: + description: User's full name + type: string + example: John Doe + role: + description: User's NetBird account role + type: string + example: user + auto_groups: + description: Group IDs to auto-assign to peers registered by this user + type: array + items: + type: string + example: ch8i4ug6lnn4g9hqv7m0 + expires_in: + description: Invite expiration time in seconds (default 72 hours) + type: integer + example: 259200 + required: + - email + - name + - role + - auto_groups + UserInvite: + type: object + description: A user invite + properties: + id: + description: Invite ID + type: string + example: d5p7eedra0h0lt6f59hg + email: + description: User's email address + type: string + example: user@example.com + name: + description: User's full name + type: string + example: John Doe + role: + description: User's NetBird account role + type: string + example: user + auto_groups: + description: Group IDs to auto-assign to peers registered by this user + type: array + items: + type: string + example: ch8i4ug6lnn4g9hqv7m0 + expires_at: + description: Invite expiration time + type: string + format: date-time + example: "2024-01-25T10:00:00Z" + created_at: + description: Invite creation time + type: string + format: date-time + example: "2024-01-22T10:00:00Z" + expired: + description: Whether the invite has expired + type: boolean + example: false + invite_token: + description: The invite link to be shared with the user. Only returned when the invite is created or regenerated. + type: string + example: nbi_Xk5Lz9mP2vQwRtYu1aN3bC4dE5fGh0ABC123 + required: + - id + - email + - name + - role + - auto_groups + - expires_at + - created_at + - expired + UserInviteInfo: + type: object + description: Public information about an invite + properties: + email: + description: User's email address + type: string + example: user@example.com + name: + description: User's full name + type: string + example: John Doe + expires_at: + description: Invite expiration time + type: string + format: date-time + example: "2024-01-25T10:00:00Z" + valid: + description: Whether the invite is still valid (not expired) + type: boolean + example: true + invited_by: + description: Name of the user who sent the invite + type: string + example: Admin User + required: + - email + - name + - expires_at + - valid + - invited_by + UserInviteAcceptRequest: + type: object + description: Request to accept an invite and set password + properties: + password: + description: >- + The password the user wants to set. Must be at least 8 characters long + and contain at least one uppercase letter, one digit, and one special + character (any character that is not a letter or digit, including spaces). + type: string + format: password + minLength: 8 + pattern: '^(?=.*[0-9])(?=.*[A-Z])(?=.*[^a-zA-Z0-9]).{8,}$' + example: SecurePass123! + required: + - password + UserInviteAcceptResponse: + type: object + description: Response after accepting an invite + properties: + success: + description: Whether the invite was accepted successfully + type: boolean + example: true + required: + - success + UserInviteRegenerateRequest: + type: object + description: Request to regenerate an invite link + properties: + expires_in: + description: Invite expiration time in seconds (default 72 hours) + type: integer + example: 259200 + UserInviteRegenerateResponse: + type: object + description: Response after regenerating an invite + properties: + invite_token: + description: The new invite token + type: string + example: nbi_Xk5Lz9mP2vQwRtYu1aN3bC4dE5fGh0ABC123 + invite_expires_at: + description: New invite expiration time + type: string + format: date-time + example: "2024-01-28T10:00:00Z" + required: + - invite_token + - invite_expires_at PeerMinimum: type: object properties: @@ -1896,19 +2195,53 @@ components: activity_code: description: The string code of the activity that occurred during the event type: string - enum: [ "user.peer.delete", "user.join", "user.invite", "user.peer.add", "user.group.add", "user.group.delete", - "user.role.update", "user.block", "user.unblock", "user.peer.login", - "setupkey.peer.add", "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", - "setupkey.group.delete", "setupkey.group.add", - "rule.add", "rule.delete", "rule.update", - "policy.add", "policy.delete", "policy.update", - "group.add", "group.update", "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", - "account.create", "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.disable", "account.setting.peer.login.expiration.enable", - "route.add", "route.delete", "route.update", - "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", - "peer.ssh.disable", "peer.ssh.enable", "peer.rename", "peer.login.expiration.disable", "peer.login.expiration.enable", "peer.login.expire", - "service.user.create", "personal.access.token.create", "service.user.delete", "personal.access.token.delete", - "service.create", "service.update", "service.delete" ] + enum: [ + "peer.user.add", "peer.setupkey.add", "user.join", "user.invite", "account.create", "account.delete", + "user.peer.delete", "rule.add", "rule.update", "rule.delete", + "policy.add", "policy.update", "policy.delete", + "setupkey.add", "setupkey.update", "setupkey.revoke", "setupkey.overuse", "setupkey.delete", + "group.add", "group.update", "group.delete", + "peer.group.add", "peer.group.delete", + "user.group.add", "user.group.delete", "user.role.update", + "setupkey.group.add", "setupkey.group.delete", + "dns.setting.disabled.management.group.add", "dns.setting.disabled.management.group.delete", + "route.add", "route.delete", "route.update", + "peer.ssh.enable", "peer.ssh.disable", "peer.rename", + "peer.login.expiration.enable", "peer.login.expiration.disable", + "nameserver.group.add", "nameserver.group.delete", "nameserver.group.update", + "account.setting.peer.login.expiration.update", "account.setting.peer.login.expiration.enable", "account.setting.peer.login.expiration.disable", + "personal.access.token.create", "personal.access.token.delete", + "service.user.create", "service.user.delete", + "user.block", "user.unblock", "user.delete", + "user.peer.login", "peer.login.expire", + "dashboard.login", + "integration.create", "integration.update", "integration.delete", + "account.setting.peer.approval.enable", "account.setting.peer.approval.disable", + "peer.approve", "peer.approval.revoke", + "transferred.owner.role", + "posture.check.create", "posture.check.update", "posture.check.delete", + "peer.inactivity.expiration.enable", "peer.inactivity.expiration.disable", + "account.peer.inactivity.expiration.enable", "account.peer.inactivity.expiration.disable", "account.peer.inactivity.expiration.update", + "account.setting.group.propagation.enable", "account.setting.group.propagation.disable", + "account.setting.routing.peer.dns.resolution.enable", "account.setting.routing.peer.dns.resolution.disable", + "network.create", "network.update", "network.delete", + "network.resource.create", "network.resource.update", "network.resource.delete", + "network.router.create", "network.router.update", "network.router.delete", + "resource.group.add", "resource.group.delete", + "account.dns.domain.update", + "account.setting.lazy.connection.enable", "account.setting.lazy.connection.disable", + "account.network.range.update", + "peer.ip.update", + "user.approve", "user.reject", "user.create", + "account.settings.auto.version.update", + "identityprovider.create", "identityprovider.update", "identityprovider.delete", + "dns.zone.create", "dns.zone.update", "dns.zone.delete", + "dns.zone.record.create", "dns.zone.record.update", "dns.zone.record.delete", + "peer.job.create", + "user.password.change", + "user.invite.link.create", "user.invite.link.accept", "user.invite.link.regenerate", "user.invite.link.delete", + "service.create", "service.update", "service.delete" + ] example: route.add initiator_id: description: The ID of the initiator of the event. E.g., an ID of a user that triggered the event. @@ -2628,6 +2961,29 @@ components: required: - user_id - email + InstanceVersionInfo: + type: object + description: Version information for NetBird components + properties: + management_current_version: + description: The current running version of the management server + type: string + example: "0.35.0" + dashboard_available_version: + description: The latest available version of the dashboard (from GitHub releases) + type: string + example: "2.10.0" + management_available_version: + description: The latest available version of the management server (from GitHub releases) + type: string + example: "0.35.0" + management_update_available: + description: Indicates if a newer management version is available + type: boolean + example: true + required: + - management_current_version + - management_update_available responses: not_found: description: Resource not found @@ -2680,6 +3036,27 @@ paths: $ref: '#/components/schemas/InstanceStatus' '500': "$ref": "#/components/responses/internal_error" + /api/instance/version: + get: + summary: Get Version Info + description: Returns version information for NetBird components including the current management server version and latest available versions from GitHub. + tags: [ Instance ] + security: + - BearerAuth: [] + - TokenAuth: [] + responses: + '200': + description: Version information + content: + application/json: + schema: + $ref: '#/components/schemas/InstanceVersionInfo' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" /api/setup: post: summary: Setup Instance @@ -2707,6 +3084,110 @@ paths: content: { } '500': "$ref": "#/components/responses/internal_error" + /api/peers/{peerId}/jobs: + get: + summary: List Jobs + description: Retrieve all jobs for a given peer + tags: [ Jobs ] + security: + - BearerAuth: [] + - TokenAuth: [] + parameters: + - in: path + name: peerId + description: The unique identifier of a peer + required: true + schema: + type: string + responses: + '200': + description: List of jobs + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/JobResponse' + '400': + $ref: '#/components/responses/bad_request' + '401': + $ref: '#/components/responses/requires_authentication' + '403': + $ref: '#/components/responses/forbidden' + '500': + $ref: '#/components/responses/internal_error' + post: + summary: Create Job + description: Create a new job for a given peer + tags: [ Jobs ] + security: + - BearerAuth: [] + - TokenAuth: [] + parameters: + - in: path + name: peerId + description: The unique identifier of a peer + required: true + schema: + type: string + requestBody: + description: Create job request + content: + application/json: + schema: + $ref: '#/components/schemas/JobRequest' + required: true + responses: + '201': + description: Job created + content: + application/json: + schema: + $ref: '#/components/schemas/JobResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" + /api/peers/{peerId}/jobs/{jobId}: + get: + summary: Get Job + description: Retrieve details of a specific job + tags: [ Jobs ] + security: + - BearerAuth: [] + - TokenAuth: [] + parameters: + - in: path + name: peerId + required: true + description: The unique identifier of a peer + schema: + type: string + - in: path + name: jobId + required: true + description: The unique identifier of a job + schema: + type: string + responses: + '200': + description: A Job object + content: + application/json: + schema: + $ref: '#/components/schemas/JobResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '500': + "$ref": "#/components/responses/internal_error" /api/accounts: get: summary: List all Accounts @@ -3134,6 +3615,43 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/users/{userId}/password: + put: + summary: Change user password + description: Change the password for a user. Only available when embedded IdP is enabled. Users can only change their own password. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: userId + required: true + schema: + type: string + description: The unique identifier of a user + requestBody: + description: Password change request + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/PasswordChangeRequest' + responses: + '200': + description: Password changed successfully + content: {} + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '500': + "$ref": "#/components/responses/internal_error" /api/users/current: get: summary: Retrieve current user @@ -3157,6 +3675,210 @@ paths: "$ref": "#/components/responses/forbidden" '500': "$ref": "#/components/responses/internal_error" + /api/users/invites: + get: + summary: List user invites + description: Lists all pending invites for the account. Only available when embedded IdP is enabled. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + responses: + '200': + description: List of invites + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/UserInvite' + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '500': + "$ref": "#/components/responses/internal_error" + post: + summary: Create a user invite + description: Creates an invite link for a new user. Only available when embedded IdP is enabled. The user is not created until they accept the invite. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + requestBody: + description: User invite information + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteCreateRequest' + responses: + '200': + description: Invite created successfully + content: + application/json: + schema: + $ref: '#/components/schemas/UserInvite' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '409': + description: User or invite already exists + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '422': + "$ref": "#/components/responses/validation_failed" + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{inviteId}: + delete: + summary: Delete a user invite + description: Deletes a pending invite. Only available when embedded IdP is enabled. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: inviteId + required: true + schema: + type: string + description: The ID of the invite to delete + responses: + '200': + description: Invite deleted successfully + content: { } + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + description: Invite not found + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{inviteId}/regenerate: + post: + summary: Regenerate a user invite + description: Regenerates an invite link for an existing invite. Invalidates the previous token and creates a new one. + tags: [ Users ] + security: + - BearerAuth: [ ] + - TokenAuth: [ ] + parameters: + - in: path + name: inviteId + required: true + schema: + type: string + description: The ID of the invite to regenerate + requestBody: + description: Regenerate options + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteRegenerateRequest' + responses: + '200': + description: Invite regenerated successfully + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteRegenerateResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + description: Invite not found + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled + content: { } + '422': + "$ref": "#/components/responses/validation_failed" + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{token}: + get: + summary: Get invite information + description: Retrieves public information about an invite. This endpoint is unauthenticated and protected by the token itself. + tags: [ Users ] + security: [] + parameters: + - in: path + name: token + required: true + schema: + type: string + description: The invite token + responses: + '200': + description: Invite information + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteInfo' + '400': + "$ref": "#/components/responses/bad_request" + '404': + description: Invite not found or invalid token + content: { } + '500': + "$ref": "#/components/responses/internal_error" + /api/users/invites/{token}/accept: + post: + summary: Accept an invite + description: Accepts an invite and creates the user with the provided password. This endpoint is unauthenticated and protected by the token itself. + tags: [ Users ] + security: [] + parameters: + - in: path + name: token + required: true + schema: + type: string + description: The invite token + requestBody: + description: Password to set for the new user + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteAcceptRequest' + responses: + '200': + description: Invite accepted successfully + content: + application/json: + schema: + $ref: '#/components/schemas/UserInviteAcceptResponse' + '400': + "$ref": "#/components/responses/bad_request" + '404': + description: Invite not found or invalid token + content: { } + '412': + description: Precondition failed - embedded IdP is not enabled or invite expired + content: { } + '422': + "$ref": "#/components/responses/validation_failed" + '500': + "$ref": "#/components/responses/internal_error" /api/peers: get: summary: List all Peers diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 6d95a8720..e8c044b32 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -1,10 +1,14 @@ // Package api provides primitives to interact with the openapi HTTP API. // -// Code generated by github.com/deepmap/oapi-codegen version v1.11.1-0.20220912230023-4a1477f6a8ba DO NOT EDIT. +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. package api import ( + "encoding/json" + "errors" "time" + + "github.com/oapi-codegen/runtime" ) const ( @@ -21,56 +25,115 @@ const ( // Defines values for EventActivityCode. const ( - EventActivityCodeAccountCreate EventActivityCode = "account.create" - EventActivityCodeAccountSettingPeerLoginExpirationDisable EventActivityCode = "account.setting.peer.login.expiration.disable" - EventActivityCodeAccountSettingPeerLoginExpirationEnable EventActivityCode = "account.setting.peer.login.expiration.enable" - EventActivityCodeAccountSettingPeerLoginExpirationUpdate EventActivityCode = "account.setting.peer.login.expiration.update" - EventActivityCodeDnsSettingDisabledManagementGroupAdd EventActivityCode = "dns.setting.disabled.management.group.add" - EventActivityCodeDnsSettingDisabledManagementGroupDelete EventActivityCode = "dns.setting.disabled.management.group.delete" - EventActivityCodeGroupAdd EventActivityCode = "group.add" - EventActivityCodeGroupUpdate EventActivityCode = "group.update" - EventActivityCodeNameserverGroupAdd EventActivityCode = "nameserver.group.add" - EventActivityCodeNameserverGroupDelete EventActivityCode = "nameserver.group.delete" - EventActivityCodeNameserverGroupUpdate EventActivityCode = "nameserver.group.update" - EventActivityCodePeerLoginExpirationDisable EventActivityCode = "peer.login.expiration.disable" - EventActivityCodePeerLoginExpirationEnable EventActivityCode = "peer.login.expiration.enable" - EventActivityCodePeerLoginExpire EventActivityCode = "peer.login.expire" - EventActivityCodePeerRename EventActivityCode = "peer.rename" - EventActivityCodePeerSshDisable EventActivityCode = "peer.ssh.disable" - EventActivityCodePeerSshEnable EventActivityCode = "peer.ssh.enable" - EventActivityCodePersonalAccessTokenCreate EventActivityCode = "personal.access.token.create" - EventActivityCodePersonalAccessTokenDelete EventActivityCode = "personal.access.token.delete" - EventActivityCodePolicyAdd EventActivityCode = "policy.add" - EventActivityCodePolicyDelete EventActivityCode = "policy.delete" - EventActivityCodePolicyUpdate EventActivityCode = "policy.update" - EventActivityCodeRouteAdd EventActivityCode = "route.add" - EventActivityCodeRouteDelete EventActivityCode = "route.delete" - EventActivityCodeRouteUpdate EventActivityCode = "route.update" - EventActivityCodeRuleAdd EventActivityCode = "rule.add" - EventActivityCodeRuleDelete EventActivityCode = "rule.delete" - EventActivityCodeRuleUpdate EventActivityCode = "rule.update" - EventActivityCodeServiceCreate EventActivityCode = "service.create" - EventActivityCodeServiceDelete EventActivityCode = "service.delete" - EventActivityCodeServiceUpdate EventActivityCode = "service.update" - EventActivityCodeServiceUserCreate EventActivityCode = "service.user.create" - EventActivityCodeServiceUserDelete EventActivityCode = "service.user.delete" - EventActivityCodeSetupkeyAdd EventActivityCode = "setupkey.add" - EventActivityCodeSetupkeyGroupAdd EventActivityCode = "setupkey.group.add" - EventActivityCodeSetupkeyGroupDelete EventActivityCode = "setupkey.group.delete" - EventActivityCodeSetupkeyOveruse EventActivityCode = "setupkey.overuse" - EventActivityCodeSetupkeyPeerAdd EventActivityCode = "setupkey.peer.add" - EventActivityCodeSetupkeyRevoke EventActivityCode = "setupkey.revoke" - EventActivityCodeSetupkeyUpdate EventActivityCode = "setupkey.update" - EventActivityCodeUserBlock EventActivityCode = "user.block" - EventActivityCodeUserGroupAdd EventActivityCode = "user.group.add" - EventActivityCodeUserGroupDelete EventActivityCode = "user.group.delete" - EventActivityCodeUserInvite EventActivityCode = "user.invite" - EventActivityCodeUserJoin EventActivityCode = "user.join" - EventActivityCodeUserPeerAdd EventActivityCode = "user.peer.add" - EventActivityCodeUserPeerDelete EventActivityCode = "user.peer.delete" - EventActivityCodeUserPeerLogin EventActivityCode = "user.peer.login" - EventActivityCodeUserRoleUpdate EventActivityCode = "user.role.update" - EventActivityCodeUserUnblock EventActivityCode = "user.unblock" + EventActivityCodeAccountCreate EventActivityCode = "account.create" + EventActivityCodeAccountDelete EventActivityCode = "account.delete" + EventActivityCodeAccountDnsDomainUpdate EventActivityCode = "account.dns.domain.update" + EventActivityCodeAccountNetworkRangeUpdate EventActivityCode = "account.network.range.update" + EventActivityCodeAccountPeerInactivityExpirationDisable EventActivityCode = "account.peer.inactivity.expiration.disable" + EventActivityCodeAccountPeerInactivityExpirationEnable EventActivityCode = "account.peer.inactivity.expiration.enable" + EventActivityCodeAccountPeerInactivityExpirationUpdate EventActivityCode = "account.peer.inactivity.expiration.update" + EventActivityCodeAccountSettingGroupPropagationDisable EventActivityCode = "account.setting.group.propagation.disable" + EventActivityCodeAccountSettingGroupPropagationEnable EventActivityCode = "account.setting.group.propagation.enable" + EventActivityCodeAccountSettingLazyConnectionDisable EventActivityCode = "account.setting.lazy.connection.disable" + EventActivityCodeAccountSettingLazyConnectionEnable EventActivityCode = "account.setting.lazy.connection.enable" + EventActivityCodeAccountSettingPeerApprovalDisable EventActivityCode = "account.setting.peer.approval.disable" + EventActivityCodeAccountSettingPeerApprovalEnable EventActivityCode = "account.setting.peer.approval.enable" + EventActivityCodeAccountSettingPeerLoginExpirationDisable EventActivityCode = "account.setting.peer.login.expiration.disable" + EventActivityCodeAccountSettingPeerLoginExpirationEnable EventActivityCode = "account.setting.peer.login.expiration.enable" + EventActivityCodeAccountSettingPeerLoginExpirationUpdate EventActivityCode = "account.setting.peer.login.expiration.update" + EventActivityCodeAccountSettingRoutingPeerDnsResolutionDisable EventActivityCode = "account.setting.routing.peer.dns.resolution.disable" + EventActivityCodeAccountSettingRoutingPeerDnsResolutionEnable EventActivityCode = "account.setting.routing.peer.dns.resolution.enable" + EventActivityCodeAccountSettingsAutoVersionUpdate EventActivityCode = "account.settings.auto.version.update" + EventActivityCodeDashboardLogin EventActivityCode = "dashboard.login" + EventActivityCodeDnsSettingDisabledManagementGroupAdd EventActivityCode = "dns.setting.disabled.management.group.add" + EventActivityCodeDnsSettingDisabledManagementGroupDelete EventActivityCode = "dns.setting.disabled.management.group.delete" + EventActivityCodeDnsZoneCreate EventActivityCode = "dns.zone.create" + EventActivityCodeDnsZoneDelete EventActivityCode = "dns.zone.delete" + EventActivityCodeDnsZoneRecordCreate EventActivityCode = "dns.zone.record.create" + EventActivityCodeDnsZoneRecordDelete EventActivityCode = "dns.zone.record.delete" + EventActivityCodeDnsZoneRecordUpdate EventActivityCode = "dns.zone.record.update" + EventActivityCodeDnsZoneUpdate EventActivityCode = "dns.zone.update" + EventActivityCodeGroupAdd EventActivityCode = "group.add" + EventActivityCodeGroupDelete EventActivityCode = "group.delete" + EventActivityCodeGroupUpdate EventActivityCode = "group.update" + EventActivityCodeIdentityproviderCreate EventActivityCode = "identityprovider.create" + EventActivityCodeIdentityproviderDelete EventActivityCode = "identityprovider.delete" + EventActivityCodeIdentityproviderUpdate EventActivityCode = "identityprovider.update" + EventActivityCodeIntegrationCreate EventActivityCode = "integration.create" + EventActivityCodeIntegrationDelete EventActivityCode = "integration.delete" + EventActivityCodeIntegrationUpdate EventActivityCode = "integration.update" + EventActivityCodeNameserverGroupAdd EventActivityCode = "nameserver.group.add" + EventActivityCodeNameserverGroupDelete EventActivityCode = "nameserver.group.delete" + EventActivityCodeNameserverGroupUpdate EventActivityCode = "nameserver.group.update" + EventActivityCodeNetworkCreate EventActivityCode = "network.create" + EventActivityCodeNetworkDelete EventActivityCode = "network.delete" + EventActivityCodeNetworkResourceCreate EventActivityCode = "network.resource.create" + EventActivityCodeNetworkResourceDelete EventActivityCode = "network.resource.delete" + EventActivityCodeNetworkResourceUpdate EventActivityCode = "network.resource.update" + EventActivityCodeNetworkRouterCreate EventActivityCode = "network.router.create" + EventActivityCodeNetworkRouterDelete EventActivityCode = "network.router.delete" + EventActivityCodeNetworkRouterUpdate EventActivityCode = "network.router.update" + EventActivityCodeNetworkUpdate EventActivityCode = "network.update" + EventActivityCodePeerApprovalRevoke EventActivityCode = "peer.approval.revoke" + EventActivityCodePeerApprove EventActivityCode = "peer.approve" + EventActivityCodePeerGroupAdd EventActivityCode = "peer.group.add" + EventActivityCodePeerGroupDelete EventActivityCode = "peer.group.delete" + EventActivityCodePeerInactivityExpirationDisable EventActivityCode = "peer.inactivity.expiration.disable" + EventActivityCodePeerInactivityExpirationEnable EventActivityCode = "peer.inactivity.expiration.enable" + EventActivityCodePeerIpUpdate EventActivityCode = "peer.ip.update" + EventActivityCodePeerJobCreate EventActivityCode = "peer.job.create" + EventActivityCodePeerLoginExpirationDisable EventActivityCode = "peer.login.expiration.disable" + EventActivityCodePeerLoginExpirationEnable EventActivityCode = "peer.login.expiration.enable" + EventActivityCodePeerLoginExpire EventActivityCode = "peer.login.expire" + EventActivityCodePeerRename EventActivityCode = "peer.rename" + EventActivityCodePeerSetupkeyAdd EventActivityCode = "peer.setupkey.add" + EventActivityCodePeerSshDisable EventActivityCode = "peer.ssh.disable" + EventActivityCodePeerSshEnable EventActivityCode = "peer.ssh.enable" + EventActivityCodePeerUserAdd EventActivityCode = "peer.user.add" + EventActivityCodePersonalAccessTokenCreate EventActivityCode = "personal.access.token.create" + EventActivityCodePersonalAccessTokenDelete EventActivityCode = "personal.access.token.delete" + EventActivityCodePolicyAdd EventActivityCode = "policy.add" + EventActivityCodePolicyDelete EventActivityCode = "policy.delete" + EventActivityCodePolicyUpdate EventActivityCode = "policy.update" + EventActivityCodePostureCheckCreate EventActivityCode = "posture.check.create" + EventActivityCodePostureCheckDelete EventActivityCode = "posture.check.delete" + EventActivityCodePostureCheckUpdate EventActivityCode = "posture.check.update" + EventActivityCodeResourceGroupAdd EventActivityCode = "resource.group.add" + EventActivityCodeResourceGroupDelete EventActivityCode = "resource.group.delete" + EventActivityCodeRouteAdd EventActivityCode = "route.add" + EventActivityCodeRouteDelete EventActivityCode = "route.delete" + EventActivityCodeRouteUpdate EventActivityCode = "route.update" + EventActivityCodeRuleAdd EventActivityCode = "rule.add" + EventActivityCodeRuleDelete EventActivityCode = "rule.delete" + EventActivityCodeRuleUpdate EventActivityCode = "rule.update" + EventActivityCodeServiceUserCreate EventActivityCode = "service.user.create" + EventActivityCodeServiceUserDelete EventActivityCode = "service.user.delete" + EventActivityCodeSetupkeyAdd EventActivityCode = "setupkey.add" + EventActivityCodeSetupkeyDelete EventActivityCode = "setupkey.delete" + EventActivityCodeSetupkeyGroupAdd EventActivityCode = "setupkey.group.add" + EventActivityCodeSetupkeyGroupDelete EventActivityCode = "setupkey.group.delete" + EventActivityCodeSetupkeyOveruse EventActivityCode = "setupkey.overuse" + EventActivityCodeSetupkeyRevoke EventActivityCode = "setupkey.revoke" + EventActivityCodeSetupkeyUpdate EventActivityCode = "setupkey.update" + EventActivityCodeTransferredOwnerRole EventActivityCode = "transferred.owner.role" + EventActivityCodeUserApprove EventActivityCode = "user.approve" + EventActivityCodeUserBlock EventActivityCode = "user.block" + EventActivityCodeUserCreate EventActivityCode = "user.create" + EventActivityCodeUserDelete EventActivityCode = "user.delete" + EventActivityCodeUserGroupAdd EventActivityCode = "user.group.add" + EventActivityCodeUserGroupDelete EventActivityCode = "user.group.delete" + EventActivityCodeUserInvite EventActivityCode = "user.invite" + EventActivityCodeUserInviteLinkAccept EventActivityCode = "user.invite.link.accept" + EventActivityCodeUserInviteLinkCreate EventActivityCode = "user.invite.link.create" + EventActivityCodeUserInviteLinkDelete EventActivityCode = "user.invite.link.delete" + EventActivityCodeUserInviteLinkRegenerate EventActivityCode = "user.invite.link.regenerate" + EventActivityCodeUserJoin EventActivityCode = "user.join" + EventActivityCodeUserPasswordChange EventActivityCode = "user.password.change" + EventActivityCodeUserPeerDelete EventActivityCode = "user.peer.delete" + EventActivityCodeUserPeerLogin EventActivityCode = "user.peer.login" + EventActivityCodeUserReject EventActivityCode = "user.reject" + EventActivityCodeUserRoleUpdate EventActivityCode = "user.role.update" + EventActivityCodeUserUnblock EventActivityCode = "user.unblock" ) // Defines values for GeoLocationCheckAction. @@ -125,6 +188,13 @@ const ( IngressPortAllocationRequestPortRangeProtocolUdp IngressPortAllocationRequestPortRangeProtocol = "udp" ) +// Defines values for JobResponseStatus. +const ( + JobResponseStatusFailed JobResponseStatus = "failed" + JobResponseStatusPending JobResponseStatus = "pending" + JobResponseStatusSucceeded JobResponseStatus = "succeeded" +) + // Defines values for NameserverNsType. const ( NameserverNsTypeUdp NameserverNsType = "udp" @@ -196,26 +266,6 @@ const ( ResourceTypeSubnet ResourceType = "subnet" ) -// Defines values for ReverseProxyAuthConfigType. -const ( - ReverseProxyAuthConfigTypeBearer ReverseProxyAuthConfigType = "bearer" - ReverseProxyAuthConfigTypeLink ReverseProxyAuthConfigType = "link" - ReverseProxyAuthConfigTypePassword ReverseProxyAuthConfigType = "password" - ReverseProxyAuthConfigTypePin ReverseProxyAuthConfigType = "pin" -) - -// Defines values for ReverseProxyTargetProtocol. -const ( - ReverseProxyTargetProtocolHttp ReverseProxyTargetProtocol = "http" - ReverseProxyTargetProtocolHttps ReverseProxyTargetProtocol = "https" -) - -// Defines values for ReverseProxyTargetTargetType. -const ( - ReverseProxyTargetTargetTypePeer ReverseProxyTargetTargetType = "peer" - ReverseProxyTargetTargetTypeResource ReverseProxyTargetTargetType = "resource" -) - // Defines values for UserStatus. const ( UserStatusActive UserStatus = "active" @@ -223,6 +273,11 @@ const ( UserStatusInvited UserStatus = "invited" ) +// Defines values for WorkloadType. +const ( + WorkloadTypeBundle WorkloadType = "bundle" +) + // Defines values for GetApiEventsNetworkTrafficParamsType. const ( GetApiEventsNetworkTrafficParamsTypeTYPEDROP GetApiEventsNetworkTrafficParamsType = "TYPE_DROP" @@ -391,13 +446,45 @@ type AvailablePorts struct { Udp int `json:"udp"` } -// BearerAuthConfig defines model for BearerAuthConfig. -type BearerAuthConfig struct { - // DistributionGroups List of group IDs that can use bearer auth - DistributionGroups *[]string `json:"distribution_groups,omitempty"` +// BundleParameters These parameters control what gets included in the bundle and how it is processed. +type BundleParameters struct { + // Anonymize Whether sensitive data should be anonymized in the bundle. + Anonymize bool `json:"anonymize"` - // Enabled Whether bearer auth is enabled - Enabled bool `json:"enabled"` + // BundleFor Whether to generate a bundle for the given timeframe. + BundleFor bool `json:"bundle_for"` + + // BundleForTime Time period in minutes for which to generate the bundle. + BundleForTime int `json:"bundle_for_time"` + + // LogFileCount Maximum number of log files to include in the bundle. + LogFileCount int `json:"log_file_count"` +} + +// BundleResult defines model for BundleResult. +type BundleResult struct { + UploadKey *string `json:"upload_key"` +} + +// BundleWorkloadRequest defines model for BundleWorkloadRequest. +type BundleWorkloadRequest struct { + // Parameters These parameters control what gets included in the bundle and how it is processed. + Parameters BundleParameters `json:"parameters"` + + // Type Identifies the type of workload the job will execute. + // Currently only `"bundle"` is supported. + Type WorkloadType `json:"type"` +} + +// BundleWorkloadResponse defines model for BundleWorkloadResponse. +type BundleWorkloadResponse struct { + // Parameters These parameters control what gets included in the bundle and how it is processed. + Parameters BundleParameters `json:"parameters"` + Result BundleResult `json:"result"` + + // Type Identifies the type of workload the job will execute. + // Currently only `"bundle"` is supported. + Type WorkloadType `json:"type"` } // Checks List of objects that perform the actual checks @@ -787,12 +874,40 @@ type InstanceStatus struct { SetupRequired bool `json:"setup_required"` } -// LinkAuthConfig defines model for LinkAuthConfig. -type LinkAuthConfig struct { - // Enabled Whether link auth is enabled - Enabled bool `json:"enabled"` +// InstanceVersionInfo Version information for NetBird components +type InstanceVersionInfo struct { + // DashboardAvailableVersion The latest available version of the dashboard (from GitHub releases) + DashboardAvailableVersion *string `json:"dashboard_available_version,omitempty"` + + // ManagementAvailableVersion The latest available version of the management server (from GitHub releases) + ManagementAvailableVersion *string `json:"management_available_version,omitempty"` + + // ManagementCurrentVersion The current running version of the management server + ManagementCurrentVersion string `json:"management_current_version"` + + // ManagementUpdateAvailable Indicates if a newer management version is available + ManagementUpdateAvailable bool `json:"management_update_available"` } +// JobRequest defines model for JobRequest. +type JobRequest struct { + Workload WorkloadRequest `json:"workload"` +} + +// JobResponse defines model for JobResponse. +type JobResponse struct { + CompletedAt *time.Time `json:"completed_at"` + CreatedAt time.Time `json:"created_at"` + FailedReason *string `json:"failed_reason"` + Id string `json:"id"` + Status JobResponseStatus `json:"status"` + TriggeredBy string `json:"triggered_by"` + Workload WorkloadResponse `json:"workload"` +} + +// JobResponseStatus defines model for JobResponse.Status. +type JobResponseStatus string + // Location Describe geographical location information type Location struct { // CityName Commonly used English name of the city @@ -1163,22 +1278,13 @@ type OSVersionCheck struct { Windows *MinKernelVersionCheck `json:"windows,omitempty"` } -// PINAuthConfig defines model for PINAuthConfig. -type PINAuthConfig struct { - // Enabled Whether PIN auth is enabled - Enabled bool `json:"enabled"` +// PasswordChangeRequest defines model for PasswordChangeRequest. +type PasswordChangeRequest struct { + // NewPassword The new password to set + NewPassword string `json:"new_password"` - // Pin PIN value - Pin string `json:"pin"` -} - -// PasswordAuthConfig defines model for PasswordAuthConfig. -type PasswordAuthConfig struct { - // Enabled Whether password auth is enabled - Enabled bool `json:"enabled"` - - // Password Auth password - Password string `json:"password"` + // OldPassword The current password + OldPassword string `json:"old_password"` } // Peer defines model for Peer. @@ -1742,87 +1848,6 @@ type Resource struct { // ResourceType defines model for ResourceType. type ResourceType string -// ReverseProxy defines model for ReverseProxy. -type ReverseProxy struct { - Auth ReverseProxyAuthConfig `json:"auth"` - - // Domain Domain for the reverse proxy - Domain string `json:"domain"` - - // Enabled Whether the reverse proxy is enabled - Enabled bool `json:"enabled"` - - // Id Reverse proxy ID - Id string `json:"id"` - - // Name Reverse proxy name - Name string `json:"name"` - - // Targets List of target backends for this reverse proxy - Targets []ReverseProxyTarget `json:"targets"` -} - -// ReverseProxyAuthConfig defines model for ReverseProxyAuthConfig. -type ReverseProxyAuthConfig struct { - BearerAuth *BearerAuthConfig `json:"bearer_auth,omitempty"` - LinkAuth *LinkAuthConfig `json:"link_auth,omitempty"` - PasswordAuth *PasswordAuthConfig `json:"password_auth,omitempty"` - PinAuth *PINAuthConfig `json:"pin_auth,omitempty"` - - // Type Authentication type - Type ReverseProxyAuthConfigType `json:"type"` -} - -// ReverseProxyAuthConfigType Authentication type -type ReverseProxyAuthConfigType string - -// ReverseProxyRequest defines model for ReverseProxyRequest. -type ReverseProxyRequest struct { - Auth ReverseProxyAuthConfig `json:"auth"` - - // Domain Domain for the reverse proxy - Domain string `json:"domain"` - - // Enabled Whether the reverse proxy is enabled - Enabled bool `json:"enabled"` - - // Name Reverse proxy name - Name string `json:"name"` - - // Targets List of target backends for this reverse proxy - Targets []ReverseProxyTarget `json:"targets"` -} - -// ReverseProxyTarget defines model for ReverseProxyTarget. -type ReverseProxyTarget struct { - // Enabled Whether this target is enabled - Enabled bool `json:"enabled"` - - // Host Backend ip or domain for this target - Host string `json:"host"` - - // Path URL path prefix for this target - Path *string `json:"path,omitempty"` - - // Port Backend port for this target - Port int `json:"port"` - - // Protocol Protocol to use when connecting to the backend - Protocol ReverseProxyTargetProtocol `json:"protocol"` - - // TargetId Target ID - TargetId string `json:"target_id"` - - // TargetType Target type (e.g., "peer", "resource") - TargetType ReverseProxyTargetTargetType `json:"target_type"` -} - -// ReverseProxyTargetProtocol Protocol to use when connecting to the backend -type ReverseProxyTargetProtocol string - -// ReverseProxyTargetTargetType Target type (e.g., "peer", "resource") -type ReverseProxyTargetTargetType string - // Route defines model for Route. type Route struct { // AccessControlGroups Access control group identifier associated with route. @@ -2160,6 +2185,99 @@ type UserCreateRequest struct { Role string `json:"role"` } +// UserInvite A user invite +type UserInvite struct { + // AutoGroups Group IDs to auto-assign to peers registered by this user + AutoGroups []string `json:"auto_groups"` + + // CreatedAt Invite creation time + CreatedAt time.Time `json:"created_at"` + + // Email User's email address + Email string `json:"email"` + + // Expired Whether the invite has expired + Expired bool `json:"expired"` + + // ExpiresAt Invite expiration time + ExpiresAt time.Time `json:"expires_at"` + + // Id Invite ID + Id string `json:"id"` + + // InviteToken The invite link to be shared with the user. Only returned when the invite is created or regenerated. + InviteToken *string `json:"invite_token,omitempty"` + + // Name User's full name + Name string `json:"name"` + + // Role User's NetBird account role + Role string `json:"role"` +} + +// UserInviteAcceptRequest Request to accept an invite and set password +type UserInviteAcceptRequest struct { + // Password The password the user wants to set. Must be at least 8 characters long and contain at least one uppercase letter, one digit, and one special character (any character that is not a letter or digit, including spaces). + Password string `json:"password"` +} + +// UserInviteAcceptResponse Response after accepting an invite +type UserInviteAcceptResponse struct { + // Success Whether the invite was accepted successfully + Success bool `json:"success"` +} + +// UserInviteCreateRequest Request to create a user invite link +type UserInviteCreateRequest struct { + // AutoGroups Group IDs to auto-assign to peers registered by this user + AutoGroups []string `json:"auto_groups"` + + // Email User's email address + Email string `json:"email"` + + // ExpiresIn Invite expiration time in seconds (default 72 hours) + ExpiresIn *int `json:"expires_in,omitempty"` + + // Name User's full name + Name string `json:"name"` + + // Role User's NetBird account role + Role string `json:"role"` +} + +// UserInviteInfo Public information about an invite +type UserInviteInfo struct { + // Email User's email address + Email string `json:"email"` + + // ExpiresAt Invite expiration time + ExpiresAt time.Time `json:"expires_at"` + + // InvitedBy Name of the user who sent the invite + InvitedBy string `json:"invited_by"` + + // Name User's full name + Name string `json:"name"` + + // Valid Whether the invite is still valid (not expired) + Valid bool `json:"valid"` +} + +// UserInviteRegenerateRequest Request to regenerate an invite link +type UserInviteRegenerateRequest struct { + // ExpiresIn Invite expiration time in seconds (default 72 hours) + ExpiresIn *int `json:"expires_in,omitempty"` +} + +// UserInviteRegenerateResponse Response after regenerating an invite +type UserInviteRegenerateResponse struct { + // InviteExpiresAt New invite expiration time + InviteExpiresAt time.Time `json:"invite_expires_at"` + + // InviteToken The new invite token + InviteToken string `json:"invite_token"` +} + // UserPermissions defines model for UserPermissions. type UserPermissions struct { // IsRestricted Indicates whether this User's Peers view is restricted @@ -2179,6 +2297,20 @@ type UserRequest struct { Role string `json:"role"` } +// WorkloadRequest defines model for WorkloadRequest. +type WorkloadRequest struct { + union json.RawMessage +} + +// WorkloadResponse defines model for WorkloadResponse. +type WorkloadResponse struct { + union json.RawMessage +} + +// WorkloadType Identifies the type of workload the job will execute. +// Currently only `"bundle"` is supported. +type WorkloadType string + // Zone defines model for Zone. type Zone struct { // DistributionGroups Group IDs that defines groups of peers that will resolve this zone @@ -2362,6 +2494,9 @@ type PostApiPeersPeerIdIngressPortsJSONRequestBody = IngressPortAllocationReques // PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody defines body for PutApiPeersPeerIdIngressPortsAllocationId for application/json ContentType. type PutApiPeersPeerIdIngressPortsAllocationIdJSONRequestBody = IngressPortAllocationRequest +// PostApiPeersPeerIdJobsJSONRequestBody defines body for PostApiPeersPeerIdJobs for application/json ContentType. +type PostApiPeersPeerIdJobsJSONRequestBody = JobRequest + // PostApiPeersPeerIdTemporaryAccessJSONRequestBody defines body for PostApiPeersPeerIdTemporaryAccess for application/json ContentType. type PostApiPeersPeerIdTemporaryAccessJSONRequestBody = PeerTemporaryAccessRequest @@ -2377,12 +2512,6 @@ type PostApiPostureChecksJSONRequestBody = PostureCheckUpdate // PutApiPostureChecksPostureCheckIdJSONRequestBody defines body for PutApiPostureChecksPostureCheckId for application/json ContentType. type PutApiPostureChecksPostureCheckIdJSONRequestBody = PostureCheckUpdate -// PostApiReverseProxyJSONRequestBody defines body for PostApiReverseProxy for application/json ContentType. -type PostApiReverseProxyJSONRequestBody = ReverseProxyRequest - -// PutApiReverseProxyProxyIdJSONRequestBody defines body for PutApiReverseProxyProxyId for application/json ContentType. -type PutApiReverseProxyProxyIdJSONRequestBody = ReverseProxyRequest - // PostApiRoutesJSONRequestBody defines body for PostApiRoutes for application/json ContentType. type PostApiRoutesJSONRequestBody = RouteRequest @@ -2401,8 +2530,138 @@ type PutApiSetupKeysKeyIdJSONRequestBody = SetupKeyRequest // PostApiUsersJSONRequestBody defines body for PostApiUsers for application/json ContentType. type PostApiUsersJSONRequestBody = UserCreateRequest +// PostApiUsersInvitesJSONRequestBody defines body for PostApiUsersInvites for application/json ContentType. +type PostApiUsersInvitesJSONRequestBody = UserInviteCreateRequest + +// PostApiUsersInvitesInviteIdRegenerateJSONRequestBody defines body for PostApiUsersInvitesInviteIdRegenerate for application/json ContentType. +type PostApiUsersInvitesInviteIdRegenerateJSONRequestBody = UserInviteRegenerateRequest + +// PostApiUsersInvitesTokenAcceptJSONRequestBody defines body for PostApiUsersInvitesTokenAccept for application/json ContentType. +type PostApiUsersInvitesTokenAcceptJSONRequestBody = UserInviteAcceptRequest + // PutApiUsersUserIdJSONRequestBody defines body for PutApiUsersUserId for application/json ContentType. type PutApiUsersUserIdJSONRequestBody = UserRequest +// PutApiUsersUserIdPasswordJSONRequestBody defines body for PutApiUsersUserIdPassword for application/json ContentType. +type PutApiUsersUserIdPasswordJSONRequestBody = PasswordChangeRequest + // PostApiUsersUserIdTokensJSONRequestBody defines body for PostApiUsersUserIdTokens for application/json ContentType. type PostApiUsersUserIdTokensJSONRequestBody = PersonalAccessTokenRequest + +// AsBundleWorkloadRequest returns the union data inside the WorkloadRequest as a BundleWorkloadRequest +func (t WorkloadRequest) AsBundleWorkloadRequest() (BundleWorkloadRequest, error) { + var body BundleWorkloadRequest + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromBundleWorkloadRequest overwrites any union data inside the WorkloadRequest as the provided BundleWorkloadRequest +func (t *WorkloadRequest) FromBundleWorkloadRequest(v BundleWorkloadRequest) error { + v.Type = "bundle" + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeBundleWorkloadRequest performs a merge with any union data inside the WorkloadRequest, using the provided BundleWorkloadRequest +func (t *WorkloadRequest) MergeBundleWorkloadRequest(v BundleWorkloadRequest) error { + v.Type = "bundle" + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t WorkloadRequest) Discriminator() (string, error) { + var discriminator struct { + Discriminator string `json:"type"` + } + err := json.Unmarshal(t.union, &discriminator) + return discriminator.Discriminator, err +} + +func (t WorkloadRequest) ValueByDiscriminator() (interface{}, error) { + discriminator, err := t.Discriminator() + if err != nil { + return nil, err + } + switch discriminator { + case "bundle": + return t.AsBundleWorkloadRequest() + default: + return nil, errors.New("unknown discriminator value: " + discriminator) + } +} + +func (t WorkloadRequest) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *WorkloadRequest) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} + +// AsBundleWorkloadResponse returns the union data inside the WorkloadResponse as a BundleWorkloadResponse +func (t WorkloadResponse) AsBundleWorkloadResponse() (BundleWorkloadResponse, error) { + var body BundleWorkloadResponse + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromBundleWorkloadResponse overwrites any union data inside the WorkloadResponse as the provided BundleWorkloadResponse +func (t *WorkloadResponse) FromBundleWorkloadResponse(v BundleWorkloadResponse) error { + v.Type = "bundle" + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeBundleWorkloadResponse performs a merge with any union data inside the WorkloadResponse, using the provided BundleWorkloadResponse +func (t *WorkloadResponse) MergeBundleWorkloadResponse(v BundleWorkloadResponse) error { + v.Type = "bundle" + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t WorkloadResponse) Discriminator() (string, error) { + var discriminator struct { + Discriminator string `json:"type"` + } + err := json.Unmarshal(t.union, &discriminator) + return discriminator.Discriminator, err +} + +func (t WorkloadResponse) ValueByDiscriminator() (interface{}, error) { + discriminator, err := t.Discriminator() + if err != nil { + return nil, err + } + switch discriminator { + case "bundle": + return t.AsBundleWorkloadResponse() + default: + return nil, errors.New("unknown discriminator value: " + discriminator) + } +} + +func (t WorkloadResponse) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *WorkloadResponse) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index dd5c9e1fc..dfa9adaf6 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.0 +// protoc v6.33.1 // source: management.proto package proto @@ -22,6 +22,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type JobStatus int32 + +const ( + JobStatus_unknown_status JobStatus = 0 //placeholder + JobStatus_succeeded JobStatus = 1 + JobStatus_failed JobStatus = 2 +) + +// Enum value maps for JobStatus. +var ( + JobStatus_name = map[int32]string{ + 0: "unknown_status", + 1: "succeeded", + 2: "failed", + } + JobStatus_value = map[string]int32{ + "unknown_status": 0, + "succeeded": 1, + "failed": 2, + } +) + +func (x JobStatus) Enum() *JobStatus { + p := new(JobStatus) + *p = x + return p +} + +func (x JobStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (JobStatus) Descriptor() protoreflect.EnumDescriptor { + return file_management_proto_enumTypes[0].Descriptor() +} + +func (JobStatus) Type() protoreflect.EnumType { + return &file_management_proto_enumTypes[0] +} + +func (x JobStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use JobStatus.Descriptor instead. +func (JobStatus) EnumDescriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{0} +} + type RuleProtocol int32 const ( @@ -64,11 +113,11 @@ func (x RuleProtocol) String() string { } func (RuleProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[0].Descriptor() + return file_management_proto_enumTypes[1].Descriptor() } func (RuleProtocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[0] + return &file_management_proto_enumTypes[1] } func (x RuleProtocol) Number() protoreflect.EnumNumber { @@ -77,7 +126,7 @@ func (x RuleProtocol) Number() protoreflect.EnumNumber { // Deprecated: Use RuleProtocol.Descriptor instead. func (RuleProtocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{0} + return file_management_proto_rawDescGZIP(), []int{1} } type RuleDirection int32 @@ -110,11 +159,11 @@ func (x RuleDirection) String() string { } func (RuleDirection) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[1].Descriptor() + return file_management_proto_enumTypes[2].Descriptor() } func (RuleDirection) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[1] + return &file_management_proto_enumTypes[2] } func (x RuleDirection) Number() protoreflect.EnumNumber { @@ -123,7 +172,7 @@ func (x RuleDirection) Number() protoreflect.EnumNumber { // Deprecated: Use RuleDirection.Descriptor instead. func (RuleDirection) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{2} } type RuleAction int32 @@ -156,11 +205,11 @@ func (x RuleAction) String() string { } func (RuleAction) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[2].Descriptor() + return file_management_proto_enumTypes[3].Descriptor() } func (RuleAction) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[2] + return &file_management_proto_enumTypes[3] } func (x RuleAction) Number() protoreflect.EnumNumber { @@ -169,7 +218,7 @@ func (x RuleAction) Number() protoreflect.EnumNumber { // Deprecated: Use RuleAction.Descriptor instead. func (RuleAction) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{3} } type HostConfig_Protocol int32 @@ -211,11 +260,11 @@ func (x HostConfig_Protocol) String() string { } func (HostConfig_Protocol) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[3].Descriptor() + return file_management_proto_enumTypes[4].Descriptor() } func (HostConfig_Protocol) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[3] + return &file_management_proto_enumTypes[4] } func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { @@ -224,7 +273,7 @@ func (x HostConfig_Protocol) Number() protoreflect.EnumNumber { // Deprecated: Use HostConfig_Protocol.Descriptor instead. func (HostConfig_Protocol) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{14, 0} + return file_management_proto_rawDescGZIP(), []int{18, 0} } type DeviceAuthorizationFlowProvider int32 @@ -254,11 +303,11 @@ func (x DeviceAuthorizationFlowProvider) String() string { } func (DeviceAuthorizationFlowProvider) Descriptor() protoreflect.EnumDescriptor { - return file_management_proto_enumTypes[4].Descriptor() + return file_management_proto_enumTypes[5].Descriptor() } func (DeviceAuthorizationFlowProvider) Type() protoreflect.EnumType { - return &file_management_proto_enumTypes[4] + return &file_management_proto_enumTypes[5] } func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { @@ -267,7 +316,7 @@ func (x DeviceAuthorizationFlowProvider) Number() protoreflect.EnumNumber { // Deprecated: Use DeviceAuthorizationFlowProvider.Descriptor instead. func (DeviceAuthorizationFlowProvider) EnumDescriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{27, 0} + return file_management_proto_rawDescGZIP(), []int{31, 0} } type EncryptedMessage struct { @@ -336,6 +385,290 @@ func (x *EncryptedMessage) GetVersion() int32 { return 0 } +type JobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID []byte `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Types that are assignable to WorkloadParameters: + // + // *JobRequest_Bundle + WorkloadParameters isJobRequest_WorkloadParameters `protobuf_oneof:"workload_parameters"` +} + +func (x *JobRequest) Reset() { + *x = JobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobRequest) ProtoMessage() {} + +func (x *JobRequest) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobRequest.ProtoReflect.Descriptor instead. +func (*JobRequest) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{1} +} + +func (x *JobRequest) GetID() []byte { + if x != nil { + return x.ID + } + return nil +} + +func (m *JobRequest) GetWorkloadParameters() isJobRequest_WorkloadParameters { + if m != nil { + return m.WorkloadParameters + } + return nil +} + +func (x *JobRequest) GetBundle() *BundleParameters { + if x, ok := x.GetWorkloadParameters().(*JobRequest_Bundle); ok { + return x.Bundle + } + return nil +} + +type isJobRequest_WorkloadParameters interface { + isJobRequest_WorkloadParameters() +} + +type JobRequest_Bundle struct { + Bundle *BundleParameters `protobuf:"bytes,10,opt,name=bundle,proto3,oneof"` //OtherParameters other = 11; +} + +func (*JobRequest_Bundle) isJobRequest_WorkloadParameters() {} + +type JobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID []byte `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Status JobStatus `protobuf:"varint,2,opt,name=status,proto3,enum=management.JobStatus" json:"status,omitempty"` + Reason []byte `protobuf:"bytes,3,opt,name=Reason,proto3" json:"Reason,omitempty"` + // Types that are assignable to WorkloadResults: + // + // *JobResponse_Bundle + WorkloadResults isJobResponse_WorkloadResults `protobuf_oneof:"workload_results"` +} + +func (x *JobResponse) Reset() { + *x = JobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JobResponse) ProtoMessage() {} + +func (x *JobResponse) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JobResponse.ProtoReflect.Descriptor instead. +func (*JobResponse) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{2} +} + +func (x *JobResponse) GetID() []byte { + if x != nil { + return x.ID + } + return nil +} + +func (x *JobResponse) GetStatus() JobStatus { + if x != nil { + return x.Status + } + return JobStatus_unknown_status +} + +func (x *JobResponse) GetReason() []byte { + if x != nil { + return x.Reason + } + return nil +} + +func (m *JobResponse) GetWorkloadResults() isJobResponse_WorkloadResults { + if m != nil { + return m.WorkloadResults + } + return nil +} + +func (x *JobResponse) GetBundle() *BundleResult { + if x, ok := x.GetWorkloadResults().(*JobResponse_Bundle); ok { + return x.Bundle + } + return nil +} + +type isJobResponse_WorkloadResults interface { + isJobResponse_WorkloadResults() +} + +type JobResponse_Bundle struct { + Bundle *BundleResult `protobuf:"bytes,10,opt,name=bundle,proto3,oneof"` //OtherResult other = 11; +} + +func (*JobResponse_Bundle) isJobResponse_WorkloadResults() {} + +type BundleParameters struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BundleFor bool `protobuf:"varint,1,opt,name=bundle_for,json=bundleFor,proto3" json:"bundle_for,omitempty"` + BundleForTime int64 `protobuf:"varint,2,opt,name=bundle_for_time,json=bundleForTime,proto3" json:"bundle_for_time,omitempty"` + LogFileCount int32 `protobuf:"varint,3,opt,name=log_file_count,json=logFileCount,proto3" json:"log_file_count,omitempty"` + Anonymize bool `protobuf:"varint,4,opt,name=anonymize,proto3" json:"anonymize,omitempty"` +} + +func (x *BundleParameters) Reset() { + *x = BundleParameters{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BundleParameters) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BundleParameters) ProtoMessage() {} + +func (x *BundleParameters) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BundleParameters.ProtoReflect.Descriptor instead. +func (*BundleParameters) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{3} +} + +func (x *BundleParameters) GetBundleFor() bool { + if x != nil { + return x.BundleFor + } + return false +} + +func (x *BundleParameters) GetBundleForTime() int64 { + if x != nil { + return x.BundleForTime + } + return 0 +} + +func (x *BundleParameters) GetLogFileCount() int32 { + if x != nil { + return x.LogFileCount + } + return 0 +} + +func (x *BundleParameters) GetAnonymize() bool { + if x != nil { + return x.Anonymize + } + return false +} + +type BundleResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UploadKey string `protobuf:"bytes,1,opt,name=upload_key,json=uploadKey,proto3" json:"upload_key,omitempty"` +} + +func (x *BundleResult) Reset() { + *x = BundleResult{} + if protoimpl.UnsafeEnabled { + mi := &file_management_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BundleResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BundleResult) ProtoMessage() {} + +func (x *BundleResult) ProtoReflect() protoreflect.Message { + mi := &file_management_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BundleResult.ProtoReflect.Descriptor instead. +func (*BundleResult) Descriptor() ([]byte, []int) { + return file_management_proto_rawDescGZIP(), []int{4} +} + +func (x *BundleResult) GetUploadKey() string { + if x != nil { + return x.UploadKey + } + return "" +} + type SyncRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -348,7 +681,7 @@ type SyncRequest struct { func (x *SyncRequest) Reset() { *x = SyncRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[1] + mi := &file_management_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -361,7 +694,7 @@ func (x *SyncRequest) String() string { func (*SyncRequest) ProtoMessage() {} func (x *SyncRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[1] + mi := &file_management_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -374,7 +707,7 @@ func (x *SyncRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncRequest.ProtoReflect.Descriptor instead. func (*SyncRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{1} + return file_management_proto_rawDescGZIP(), []int{5} } func (x *SyncRequest) GetMeta() *PeerSystemMeta { @@ -407,7 +740,7 @@ type SyncResponse struct { func (x *SyncResponse) Reset() { *x = SyncResponse{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[2] + mi := &file_management_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -420,7 +753,7 @@ func (x *SyncResponse) String() string { func (*SyncResponse) ProtoMessage() {} func (x *SyncResponse) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[2] + mi := &file_management_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -433,7 +766,7 @@ func (x *SyncResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncResponse.ProtoReflect.Descriptor instead. func (*SyncResponse) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{2} + return file_management_proto_rawDescGZIP(), []int{6} } func (x *SyncResponse) GetNetbirdConfig() *NetbirdConfig { @@ -490,7 +823,7 @@ type SyncMetaRequest struct { func (x *SyncMetaRequest) Reset() { *x = SyncMetaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[3] + mi := &file_management_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -503,7 +836,7 @@ func (x *SyncMetaRequest) String() string { func (*SyncMetaRequest) ProtoMessage() {} func (x *SyncMetaRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[3] + mi := &file_management_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -516,7 +849,7 @@ func (x *SyncMetaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncMetaRequest.ProtoReflect.Descriptor instead. func (*SyncMetaRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{3} + return file_management_proto_rawDescGZIP(), []int{7} } func (x *SyncMetaRequest) GetMeta() *PeerSystemMeta { @@ -545,7 +878,7 @@ type LoginRequest struct { func (x *LoginRequest) Reset() { *x = LoginRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[4] + mi := &file_management_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -558,7 +891,7 @@ func (x *LoginRequest) String() string { func (*LoginRequest) ProtoMessage() {} func (x *LoginRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[4] + mi := &file_management_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -571,7 +904,7 @@ func (x *LoginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. func (*LoginRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{4} + return file_management_proto_rawDescGZIP(), []int{8} } func (x *LoginRequest) GetSetupKey() string { @@ -625,7 +958,7 @@ type PeerKeys struct { func (x *PeerKeys) Reset() { *x = PeerKeys{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[5] + mi := &file_management_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -638,7 +971,7 @@ func (x *PeerKeys) String() string { func (*PeerKeys) ProtoMessage() {} func (x *PeerKeys) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[5] + mi := &file_management_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -651,7 +984,7 @@ func (x *PeerKeys) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerKeys.ProtoReflect.Descriptor instead. func (*PeerKeys) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{5} + return file_management_proto_rawDescGZIP(), []int{9} } func (x *PeerKeys) GetSshPubKey() []byte { @@ -683,7 +1016,7 @@ type Environment struct { func (x *Environment) Reset() { *x = Environment{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[6] + mi := &file_management_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -696,7 +1029,7 @@ func (x *Environment) String() string { func (*Environment) ProtoMessage() {} func (x *Environment) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[6] + mi := &file_management_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -709,7 +1042,7 @@ func (x *Environment) ProtoReflect() protoreflect.Message { // Deprecated: Use Environment.ProtoReflect.Descriptor instead. func (*Environment) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{6} + return file_management_proto_rawDescGZIP(), []int{10} } func (x *Environment) GetCloud() string { @@ -743,7 +1076,7 @@ type File struct { func (x *File) Reset() { *x = File{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[7] + mi := &file_management_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -756,7 +1089,7 @@ func (x *File) String() string { func (*File) ProtoMessage() {} func (x *File) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[7] + mi := &file_management_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -769,7 +1102,7 @@ func (x *File) ProtoReflect() protoreflect.Message { // Deprecated: Use File.ProtoReflect.Descriptor instead. func (*File) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{7} + return file_management_proto_rawDescGZIP(), []int{11} } func (x *File) GetPath() string { @@ -818,7 +1151,7 @@ type Flags struct { func (x *Flags) Reset() { *x = Flags{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[8] + mi := &file_management_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -831,7 +1164,7 @@ func (x *Flags) String() string { func (*Flags) ProtoMessage() {} func (x *Flags) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[8] + mi := &file_management_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -844,7 +1177,7 @@ func (x *Flags) ProtoReflect() protoreflect.Message { // Deprecated: Use Flags.ProtoReflect.Descriptor instead. func (*Flags) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{8} + return file_management_proto_rawDescGZIP(), []int{12} } func (x *Flags) GetRosenpassEnabled() bool { @@ -980,7 +1313,7 @@ type PeerSystemMeta struct { func (x *PeerSystemMeta) Reset() { *x = PeerSystemMeta{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[9] + mi := &file_management_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -993,7 +1326,7 @@ func (x *PeerSystemMeta) String() string { func (*PeerSystemMeta) ProtoMessage() {} func (x *PeerSystemMeta) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[9] + mi := &file_management_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1006,7 +1339,7 @@ func (x *PeerSystemMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerSystemMeta.ProtoReflect.Descriptor instead. func (*PeerSystemMeta) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{9} + return file_management_proto_rawDescGZIP(), []int{13} } func (x *PeerSystemMeta) GetHostname() string { @@ -1144,7 +1477,7 @@ type LoginResponse struct { func (x *LoginResponse) Reset() { *x = LoginResponse{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[10] + mi := &file_management_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1157,7 +1490,7 @@ func (x *LoginResponse) String() string { func (*LoginResponse) ProtoMessage() {} func (x *LoginResponse) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[10] + mi := &file_management_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1170,7 +1503,7 @@ func (x *LoginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LoginResponse.ProtoReflect.Descriptor instead. func (*LoginResponse) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{10} + return file_management_proto_rawDescGZIP(), []int{14} } func (x *LoginResponse) GetNetbirdConfig() *NetbirdConfig { @@ -1210,7 +1543,7 @@ type ServerKeyResponse struct { func (x *ServerKeyResponse) Reset() { *x = ServerKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[11] + mi := &file_management_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1223,7 +1556,7 @@ func (x *ServerKeyResponse) String() string { func (*ServerKeyResponse) ProtoMessage() {} func (x *ServerKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[11] + mi := &file_management_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1236,7 +1569,7 @@ func (x *ServerKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerKeyResponse.ProtoReflect.Descriptor instead. func (*ServerKeyResponse) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{11} + return file_management_proto_rawDescGZIP(), []int{15} } func (x *ServerKeyResponse) GetKey() string { @@ -1269,7 +1602,7 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[12] + mi := &file_management_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1282,7 +1615,7 @@ func (x *Empty) String() string { func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[12] + mi := &file_management_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1295,7 +1628,7 @@ func (x *Empty) ProtoReflect() protoreflect.Message { // Deprecated: Use Empty.ProtoReflect.Descriptor instead. func (*Empty) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{12} + return file_management_proto_rawDescGZIP(), []int{16} } // NetbirdConfig is a common configuration of any Netbird peer. It contains STUN, TURN, Signal and Management servers configurations @@ -1317,7 +1650,7 @@ type NetbirdConfig struct { func (x *NetbirdConfig) Reset() { *x = NetbirdConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[13] + mi := &file_management_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1330,7 +1663,7 @@ func (x *NetbirdConfig) String() string { func (*NetbirdConfig) ProtoMessage() {} func (x *NetbirdConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[13] + mi := &file_management_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1343,7 +1676,7 @@ func (x *NetbirdConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use NetbirdConfig.ProtoReflect.Descriptor instead. func (*NetbirdConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{13} + return file_management_proto_rawDescGZIP(), []int{17} } func (x *NetbirdConfig) GetStuns() []*HostConfig { @@ -1395,7 +1728,7 @@ type HostConfig struct { func (x *HostConfig) Reset() { *x = HostConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[14] + mi := &file_management_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1408,7 +1741,7 @@ func (x *HostConfig) String() string { func (*HostConfig) ProtoMessage() {} func (x *HostConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[14] + mi := &file_management_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1421,7 +1754,7 @@ func (x *HostConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use HostConfig.ProtoReflect.Descriptor instead. func (*HostConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{14} + return file_management_proto_rawDescGZIP(), []int{18} } func (x *HostConfig) GetUri() string { @@ -1451,7 +1784,7 @@ type RelayConfig struct { func (x *RelayConfig) Reset() { *x = RelayConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[15] + mi := &file_management_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1464,7 +1797,7 @@ func (x *RelayConfig) String() string { func (*RelayConfig) ProtoMessage() {} func (x *RelayConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[15] + mi := &file_management_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1477,7 +1810,7 @@ func (x *RelayConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RelayConfig.ProtoReflect.Descriptor instead. func (*RelayConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{15} + return file_management_proto_rawDescGZIP(), []int{19} } func (x *RelayConfig) GetUrls() []string { @@ -1522,7 +1855,7 @@ type FlowConfig struct { func (x *FlowConfig) Reset() { *x = FlowConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[16] + mi := &file_management_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1535,7 +1868,7 @@ func (x *FlowConfig) String() string { func (*FlowConfig) ProtoMessage() {} func (x *FlowConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[16] + mi := &file_management_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1548,7 +1881,7 @@ func (x *FlowConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use FlowConfig.ProtoReflect.Descriptor instead. func (*FlowConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{16} + return file_management_proto_rawDescGZIP(), []int{20} } func (x *FlowConfig) GetUrl() string { @@ -1626,7 +1959,7 @@ type JWTConfig struct { func (x *JWTConfig) Reset() { *x = JWTConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[17] + mi := &file_management_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1639,7 +1972,7 @@ func (x *JWTConfig) String() string { func (*JWTConfig) ProtoMessage() {} func (x *JWTConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[17] + mi := &file_management_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1652,7 +1985,7 @@ func (x *JWTConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use JWTConfig.ProtoReflect.Descriptor instead. func (*JWTConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{17} + return file_management_proto_rawDescGZIP(), []int{21} } func (x *JWTConfig) GetIssuer() string { @@ -1705,7 +2038,7 @@ type ProtectedHostConfig struct { func (x *ProtectedHostConfig) Reset() { *x = ProtectedHostConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[18] + mi := &file_management_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1718,7 +2051,7 @@ func (x *ProtectedHostConfig) String() string { func (*ProtectedHostConfig) ProtoMessage() {} func (x *ProtectedHostConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[18] + mi := &file_management_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1731,7 +2064,7 @@ func (x *ProtectedHostConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtectedHostConfig.ProtoReflect.Descriptor instead. func (*ProtectedHostConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{18} + return file_management_proto_rawDescGZIP(), []int{22} } func (x *ProtectedHostConfig) GetHostConfig() *HostConfig { @@ -1780,7 +2113,7 @@ type PeerConfig struct { func (x *PeerConfig) Reset() { *x = PeerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[19] + mi := &file_management_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1793,7 +2126,7 @@ func (x *PeerConfig) String() string { func (*PeerConfig) ProtoMessage() {} func (x *PeerConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[19] + mi := &file_management_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1806,7 +2139,7 @@ func (x *PeerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerConfig.ProtoReflect.Descriptor instead. func (*PeerConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{19} + return file_management_proto_rawDescGZIP(), []int{23} } func (x *PeerConfig) GetAddress() string { @@ -1879,7 +2212,7 @@ type AutoUpdateSettings struct { func (x *AutoUpdateSettings) Reset() { *x = AutoUpdateSettings{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[20] + mi := &file_management_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1892,7 +2225,7 @@ func (x *AutoUpdateSettings) String() string { func (*AutoUpdateSettings) ProtoMessage() {} func (x *AutoUpdateSettings) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[20] + mi := &file_management_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1905,7 +2238,7 @@ func (x *AutoUpdateSettings) ProtoReflect() protoreflect.Message { // Deprecated: Use AutoUpdateSettings.ProtoReflect.Descriptor instead. func (*AutoUpdateSettings) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{20} + return file_management_proto_rawDescGZIP(), []int{24} } func (x *AutoUpdateSettings) GetVersion() string { @@ -1960,7 +2293,7 @@ type NetworkMap struct { func (x *NetworkMap) Reset() { *x = NetworkMap{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[21] + mi := &file_management_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1973,7 +2306,7 @@ func (x *NetworkMap) String() string { func (*NetworkMap) ProtoMessage() {} func (x *NetworkMap) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[21] + mi := &file_management_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1986,7 +2319,7 @@ func (x *NetworkMap) ProtoReflect() protoreflect.Message { // Deprecated: Use NetworkMap.ProtoReflect.Descriptor instead. func (*NetworkMap) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{21} + return file_management_proto_rawDescGZIP(), []int{25} } func (x *NetworkMap) GetSerial() uint64 { @@ -2096,7 +2429,7 @@ type SSHAuth struct { func (x *SSHAuth) Reset() { *x = SSHAuth{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[22] + mi := &file_management_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2109,7 +2442,7 @@ func (x *SSHAuth) String() string { func (*SSHAuth) ProtoMessage() {} func (x *SSHAuth) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[22] + mi := &file_management_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2122,7 +2455,7 @@ func (x *SSHAuth) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHAuth.ProtoReflect.Descriptor instead. func (*SSHAuth) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{22} + return file_management_proto_rawDescGZIP(), []int{26} } func (x *SSHAuth) GetUserIDClaim() string { @@ -2157,7 +2490,7 @@ type MachineUserIndexes struct { func (x *MachineUserIndexes) Reset() { *x = MachineUserIndexes{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[23] + mi := &file_management_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2170,7 +2503,7 @@ func (x *MachineUserIndexes) String() string { func (*MachineUserIndexes) ProtoMessage() {} func (x *MachineUserIndexes) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[23] + mi := &file_management_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2183,7 +2516,7 @@ func (x *MachineUserIndexes) ProtoReflect() protoreflect.Message { // Deprecated: Use MachineUserIndexes.ProtoReflect.Descriptor instead. func (*MachineUserIndexes) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{23} + return file_management_proto_rawDescGZIP(), []int{27} } func (x *MachineUserIndexes) GetIndexes() []uint32 { @@ -2214,7 +2547,7 @@ type RemotePeerConfig struct { func (x *RemotePeerConfig) Reset() { *x = RemotePeerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[24] + mi := &file_management_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2227,7 +2560,7 @@ func (x *RemotePeerConfig) String() string { func (*RemotePeerConfig) ProtoMessage() {} func (x *RemotePeerConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[24] + mi := &file_management_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2240,7 +2573,7 @@ func (x *RemotePeerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RemotePeerConfig.ProtoReflect.Descriptor instead. func (*RemotePeerConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{24} + return file_management_proto_rawDescGZIP(), []int{28} } func (x *RemotePeerConfig) GetWgPubKey() string { @@ -2295,7 +2628,7 @@ type SSHConfig struct { func (x *SSHConfig) Reset() { *x = SSHConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[25] + mi := &file_management_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2308,7 +2641,7 @@ func (x *SSHConfig) String() string { func (*SSHConfig) ProtoMessage() {} func (x *SSHConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[25] + mi := &file_management_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2321,7 +2654,7 @@ func (x *SSHConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use SSHConfig.ProtoReflect.Descriptor instead. func (*SSHConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{25} + return file_management_proto_rawDescGZIP(), []int{29} } func (x *SSHConfig) GetSshEnabled() bool { @@ -2355,7 +2688,7 @@ type DeviceAuthorizationFlowRequest struct { func (x *DeviceAuthorizationFlowRequest) Reset() { *x = DeviceAuthorizationFlowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[26] + mi := &file_management_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2368,7 +2701,7 @@ func (x *DeviceAuthorizationFlowRequest) String() string { func (*DeviceAuthorizationFlowRequest) ProtoMessage() {} func (x *DeviceAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[26] + mi := &file_management_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2381,7 +2714,7 @@ func (x *DeviceAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeviceAuthorizationFlowRequest.ProtoReflect.Descriptor instead. func (*DeviceAuthorizationFlowRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{26} + return file_management_proto_rawDescGZIP(), []int{30} } // DeviceAuthorizationFlow represents Device Authorization Flow information @@ -2400,7 +2733,7 @@ type DeviceAuthorizationFlow struct { func (x *DeviceAuthorizationFlow) Reset() { *x = DeviceAuthorizationFlow{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[27] + mi := &file_management_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2413,7 +2746,7 @@ func (x *DeviceAuthorizationFlow) String() string { func (*DeviceAuthorizationFlow) ProtoMessage() {} func (x *DeviceAuthorizationFlow) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[27] + mi := &file_management_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2426,7 +2759,7 @@ func (x *DeviceAuthorizationFlow) ProtoReflect() protoreflect.Message { // Deprecated: Use DeviceAuthorizationFlow.ProtoReflect.Descriptor instead. func (*DeviceAuthorizationFlow) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{27} + return file_management_proto_rawDescGZIP(), []int{31} } func (x *DeviceAuthorizationFlow) GetProvider() DeviceAuthorizationFlowProvider { @@ -2453,7 +2786,7 @@ type PKCEAuthorizationFlowRequest struct { func (x *PKCEAuthorizationFlowRequest) Reset() { *x = PKCEAuthorizationFlowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[28] + mi := &file_management_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2466,7 +2799,7 @@ func (x *PKCEAuthorizationFlowRequest) String() string { func (*PKCEAuthorizationFlowRequest) ProtoMessage() {} func (x *PKCEAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[28] + mi := &file_management_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2479,7 +2812,7 @@ func (x *PKCEAuthorizationFlowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PKCEAuthorizationFlowRequest.ProtoReflect.Descriptor instead. func (*PKCEAuthorizationFlowRequest) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{28} + return file_management_proto_rawDescGZIP(), []int{32} } // PKCEAuthorizationFlow represents Authorization Code Flow information @@ -2496,7 +2829,7 @@ type PKCEAuthorizationFlow struct { func (x *PKCEAuthorizationFlow) Reset() { *x = PKCEAuthorizationFlow{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[29] + mi := &file_management_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2509,7 +2842,7 @@ func (x *PKCEAuthorizationFlow) String() string { func (*PKCEAuthorizationFlow) ProtoMessage() {} func (x *PKCEAuthorizationFlow) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[29] + mi := &file_management_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2522,7 +2855,7 @@ func (x *PKCEAuthorizationFlow) ProtoReflect() protoreflect.Message { // Deprecated: Use PKCEAuthorizationFlow.ProtoReflect.Descriptor instead. func (*PKCEAuthorizationFlow) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{29} + return file_management_proto_rawDescGZIP(), []int{33} } func (x *PKCEAuthorizationFlow) GetProviderConfig() *ProviderConfig { @@ -2568,7 +2901,7 @@ type ProviderConfig struct { func (x *ProviderConfig) Reset() { *x = ProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[30] + mi := &file_management_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2581,7 +2914,7 @@ func (x *ProviderConfig) String() string { func (*ProviderConfig) ProtoMessage() {} func (x *ProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[30] + mi := &file_management_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2594,7 +2927,7 @@ func (x *ProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ProviderConfig.ProtoReflect.Descriptor instead. func (*ProviderConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{30} + return file_management_proto_rawDescGZIP(), []int{34} } func (x *ProviderConfig) GetClientID() string { @@ -2702,7 +3035,7 @@ type Route struct { func (x *Route) Reset() { *x = Route{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[31] + mi := &file_management_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2715,7 +3048,7 @@ func (x *Route) String() string { func (*Route) ProtoMessage() {} func (x *Route) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[31] + mi := &file_management_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2728,7 +3061,7 @@ func (x *Route) ProtoReflect() protoreflect.Message { // Deprecated: Use Route.ProtoReflect.Descriptor instead. func (*Route) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{31} + return file_management_proto_rawDescGZIP(), []int{35} } func (x *Route) GetID() string { @@ -2817,7 +3150,7 @@ type DNSConfig struct { func (x *DNSConfig) Reset() { *x = DNSConfig{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[32] + mi := &file_management_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2830,7 +3163,7 @@ func (x *DNSConfig) String() string { func (*DNSConfig) ProtoMessage() {} func (x *DNSConfig) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[32] + mi := &file_management_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2843,7 +3176,7 @@ func (x *DNSConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use DNSConfig.ProtoReflect.Descriptor instead. func (*DNSConfig) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{32} + return file_management_proto_rawDescGZIP(), []int{36} } func (x *DNSConfig) GetServiceEnable() bool { @@ -2892,7 +3225,7 @@ type CustomZone struct { func (x *CustomZone) Reset() { *x = CustomZone{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[33] + mi := &file_management_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2905,7 +3238,7 @@ func (x *CustomZone) String() string { func (*CustomZone) ProtoMessage() {} func (x *CustomZone) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[33] + mi := &file_management_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2918,7 +3251,7 @@ func (x *CustomZone) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomZone.ProtoReflect.Descriptor instead. func (*CustomZone) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{33} + return file_management_proto_rawDescGZIP(), []int{37} } func (x *CustomZone) GetDomain() string { @@ -2965,7 +3298,7 @@ type SimpleRecord struct { func (x *SimpleRecord) Reset() { *x = SimpleRecord{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[34] + mi := &file_management_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2978,7 +3311,7 @@ func (x *SimpleRecord) String() string { func (*SimpleRecord) ProtoMessage() {} func (x *SimpleRecord) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[34] + mi := &file_management_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2991,7 +3324,7 @@ func (x *SimpleRecord) ProtoReflect() protoreflect.Message { // Deprecated: Use SimpleRecord.ProtoReflect.Descriptor instead. func (*SimpleRecord) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{34} + return file_management_proto_rawDescGZIP(), []int{38} } func (x *SimpleRecord) GetName() string { @@ -3044,7 +3377,7 @@ type NameServerGroup struct { func (x *NameServerGroup) Reset() { *x = NameServerGroup{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[35] + mi := &file_management_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3057,7 +3390,7 @@ func (x *NameServerGroup) String() string { func (*NameServerGroup) ProtoMessage() {} func (x *NameServerGroup) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[35] + mi := &file_management_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3070,7 +3403,7 @@ func (x *NameServerGroup) ProtoReflect() protoreflect.Message { // Deprecated: Use NameServerGroup.ProtoReflect.Descriptor instead. func (*NameServerGroup) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{35} + return file_management_proto_rawDescGZIP(), []int{39} } func (x *NameServerGroup) GetNameServers() []*NameServer { @@ -3115,7 +3448,7 @@ type NameServer struct { func (x *NameServer) Reset() { *x = NameServer{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[36] + mi := &file_management_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3128,7 +3461,7 @@ func (x *NameServer) String() string { func (*NameServer) ProtoMessage() {} func (x *NameServer) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[36] + mi := &file_management_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3141,7 +3474,7 @@ func (x *NameServer) ProtoReflect() protoreflect.Message { // Deprecated: Use NameServer.ProtoReflect.Descriptor instead. func (*NameServer) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{36} + return file_management_proto_rawDescGZIP(), []int{40} } func (x *NameServer) GetIP() string { @@ -3184,7 +3517,7 @@ type FirewallRule struct { func (x *FirewallRule) Reset() { *x = FirewallRule{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[37] + mi := &file_management_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3530,7 @@ func (x *FirewallRule) String() string { func (*FirewallRule) ProtoMessage() {} func (x *FirewallRule) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[37] + mi := &file_management_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3210,7 +3543,7 @@ func (x *FirewallRule) ProtoReflect() protoreflect.Message { // Deprecated: Use FirewallRule.ProtoReflect.Descriptor instead. func (*FirewallRule) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{37} + return file_management_proto_rawDescGZIP(), []int{41} } func (x *FirewallRule) GetPeerIP() string { @@ -3274,7 +3607,7 @@ type NetworkAddress struct { func (x *NetworkAddress) Reset() { *x = NetworkAddress{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[38] + mi := &file_management_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3287,7 +3620,7 @@ func (x *NetworkAddress) String() string { func (*NetworkAddress) ProtoMessage() {} func (x *NetworkAddress) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[38] + mi := &file_management_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3300,7 +3633,7 @@ func (x *NetworkAddress) ProtoReflect() protoreflect.Message { // Deprecated: Use NetworkAddress.ProtoReflect.Descriptor instead. func (*NetworkAddress) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{38} + return file_management_proto_rawDescGZIP(), []int{42} } func (x *NetworkAddress) GetNetIP() string { @@ -3328,7 +3661,7 @@ type Checks struct { func (x *Checks) Reset() { *x = Checks{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[39] + mi := &file_management_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3341,7 +3674,7 @@ func (x *Checks) String() string { func (*Checks) ProtoMessage() {} func (x *Checks) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[39] + mi := &file_management_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3354,7 +3687,7 @@ func (x *Checks) ProtoReflect() protoreflect.Message { // Deprecated: Use Checks.ProtoReflect.Descriptor instead. func (*Checks) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{39} + return file_management_proto_rawDescGZIP(), []int{43} } func (x *Checks) GetFiles() []string { @@ -3379,7 +3712,7 @@ type PortInfo struct { func (x *PortInfo) Reset() { *x = PortInfo{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[40] + mi := &file_management_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3392,7 +3725,7 @@ func (x *PortInfo) String() string { func (*PortInfo) ProtoMessage() {} func (x *PortInfo) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[40] + mi := &file_management_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3405,7 +3738,7 @@ func (x *PortInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use PortInfo.ProtoReflect.Descriptor instead. func (*PortInfo) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{40} + return file_management_proto_rawDescGZIP(), []int{44} } func (m *PortInfo) GetPortSelection() isPortInfo_PortSelection { @@ -3476,7 +3809,7 @@ type RouteFirewallRule struct { func (x *RouteFirewallRule) Reset() { *x = RouteFirewallRule{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[41] + mi := &file_management_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3489,7 +3822,7 @@ func (x *RouteFirewallRule) String() string { func (*RouteFirewallRule) ProtoMessage() {} func (x *RouteFirewallRule) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[41] + mi := &file_management_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3502,7 +3835,7 @@ func (x *RouteFirewallRule) ProtoReflect() protoreflect.Message { // Deprecated: Use RouteFirewallRule.ProtoReflect.Descriptor instead. func (*RouteFirewallRule) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{41} + return file_management_proto_rawDescGZIP(), []int{45} } func (x *RouteFirewallRule) GetSourceRanges() []string { @@ -3593,7 +3926,7 @@ type ForwardingRule struct { func (x *ForwardingRule) Reset() { *x = ForwardingRule{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[42] + mi := &file_management_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3606,7 +3939,7 @@ func (x *ForwardingRule) String() string { func (*ForwardingRule) ProtoMessage() {} func (x *ForwardingRule) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[42] + mi := &file_management_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3619,7 +3952,7 @@ func (x *ForwardingRule) ProtoReflect() protoreflect.Message { // Deprecated: Use ForwardingRule.ProtoReflect.Descriptor instead. func (*ForwardingRule) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{42} + return file_management_proto_rawDescGZIP(), []int{46} } func (x *ForwardingRule) GetProtocol() RuleProtocol { @@ -3662,7 +3995,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} if protoimpl.UnsafeEnabled { - mi := &file_management_proto_msgTypes[44] + mi := &file_management_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3675,7 +4008,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_management_proto_msgTypes[44] + mi := &file_management_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3688,7 +4021,7 @@ func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { // Deprecated: Use PortInfo_Range.ProtoReflect.Descriptor instead. func (*PortInfo_Range) Descriptor() ([]byte, []int) { - return file_management_proto_rawDescGZIP(), []int{40, 0} + return file_management_proto_rawDescGZIP(), []int{44, 0} } func (x *PortInfo_Range) GetStart() uint32 { @@ -3719,586 +4052,625 @@ var file_management_proto_rawDesc = []byte{ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x0a, - 0x0b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, - 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xdb, 0x02, 0x0a, - 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, - 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, - 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, - 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x4d, 0x61, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, - 0x61, 0x70, 0x52, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x2a, - 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, - 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, - 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, - 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x44, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, - 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, - 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, - 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, - 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, - 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, - 0x2a, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, - 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, - 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x6b, 0x0a, + 0x0a, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x49, 0x44, 0x12, 0x36, 0x0a, 0x06, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x48, 0x00, 0x52, 0x06, 0x62, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0xac, 0x01, 0x0a, 0x0b, 0x4a, + 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x49, 0x44, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x12, 0x32, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x48, 0x00, 0x52, 0x06, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x10, 0x42, 0x75, + 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x26, 0x0a, + 0x0f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x46, 0x6f, + 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, + 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x22, 0x2d, 0x0a, 0x0c, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, + 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4b, 0x65, 0x79, 0x22, 0x3d, 0x0a, 0x0b, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xdb, 0x02, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, + 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, + 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, + 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x52, 0x0a, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, + 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0xc6, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, + 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x77, 0x74, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x6e, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x22, 0x44, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x77, + 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x45, 0x6e, 0x76, 0x69, 0x72, + 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x5c, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x65, 0x78, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x73, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0xbf, 0x05, 0x0a, 0x05, 0x46, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, + 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x76, 0x65, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, - 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, - 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, + 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, + 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x2a, + 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, - 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x44, 0x4e, 0x53, 0x12, 0x28, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, - 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, - 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, - 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, - 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, - 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, - 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, - 0x44, 0x0a, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, + 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x4e, 0x53, 0x12, 0x28, + 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x41, 0x4e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x6c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x6f, 0x6f, 0x74, + 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x53, 0x46, 0x54, + 0x50, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, + 0x53, 0x48, 0x53, 0x46, 0x54, 0x50, 0x12, 0x42, 0x0a, 0x1c, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x72, 0x74, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x44, 0x0a, 0x1d, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, + 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, - 0x48, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x22, 0xf2, 0x04, - 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x67, 0x6f, 0x4f, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, - 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x4f, 0x53, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, - 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, - 0x0a, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, - 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, - 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, - 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, - 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, - 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x12, 0x26, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, - 0x67, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, - 0x67, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, - 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x38, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, - 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x2c, 0x0a, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, - 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, - 0x75, 0x72, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, + 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, + 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x22, 0xf2, 0x04, 0x0a, 0x0e, 0x50, 0x65, 0x65, + 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x6f, 0x4f, 0x53, 0x12, 0x16, 0x0a, 0x06, 0x6b, + 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6b, 0x65, 0x72, + 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x4f, 0x53, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6e, 0x65, 0x74, + 0x62, 0x69, 0x72, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x75, + 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x75, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x65, 0x72, + 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, + 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x73, 0x79, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, + 0x26, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x64, + 0x75, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, + 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x73, 0x79, 0x73, 0x4d, 0x61, 0x6e, 0x75, 0x66, 0x61, 0x63, 0x74, 0x75, 0x72, 0x65, + 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x05, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0xb4, 0x01, + 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3f, 0x0a, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, - 0x6c, 0x61, 0x79, 0x12, 0x2a, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, - 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, - 0x98, 0x01, 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, - 0x12, 0x3b, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, - 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, - 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, - 0x54, 0x54, 0x50, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, - 0x12, 0x08, 0x0a, 0x04, 0x44, 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, - 0x6c, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, - 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, - 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, - 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, - 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, - 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, - 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, - 0x7d, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, - 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, - 0x02, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, - 0x64, 0x6e, 0x12, 0x48, 0x0a, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, - 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, - 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, - 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x6d, 0x74, 0x75, 0x12, 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x22, 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, - 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, - 0x36, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, - 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, - 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, - 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, - 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, - 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, - 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, - 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, - 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, - 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, - 0x0a, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x1a, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, - 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, - 0x0a, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, - 0x75, 0x74, 0x68, 0x22, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, - 0x20, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, - 0x6d, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, - 0x73, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, - 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, - 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, - 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, - 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, - 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, - 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, - 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x77, 0x67, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, - 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, - 0x64, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, - 0x79, 0x12, 0x33, 0x0a, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, + 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x52, 0x06, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x22, 0x79, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xff, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, + 0x62, 0x69, 0x72, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x05, 0x73, 0x74, + 0x75, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x05, 0x73, 0x74, 0x75, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x05, 0x74, 0x75, 0x72, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x12, + 0x2e, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, + 0x2d, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6c, 0x61, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x2a, + 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x48, + 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0x3b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x02, + 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, + 0x54, 0x4c, 0x53, 0x10, 0x04, 0x22, 0x6d, 0x0a, 0x0b, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2e, + 0x0a, 0x12, 0x65, 0x78, 0x69, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x65, 0x78, 0x69, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x0a, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x64, 0x6e, 0x73, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4a, 0x57, 0x54, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, + 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, + 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6b, 0x65, + 0x79, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, + 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x7d, 0x0a, 0x13, 0x50, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x36, 0x0a, 0x0a, 0x68, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x68, + 0x6f, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0a, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x64, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, + 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x48, 0x0a, + 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, + 0x65, 0x65, 0x72, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x4c, 0x61, 0x7a, 0x79, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x6d, 0x74, 0x75, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6d, 0x74, 0x75, 0x12, + 0x3e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, + 0x52, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x22, 0x0a, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x22, 0xe8, 0x05, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, + 0x61, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x29, 0x0a, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, + 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x4e, + 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x6f, 0x66, 0x66, 0x6c, 0x69, 0x6e, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0d, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x14, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x1a, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x73, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x49, 0x73, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0f, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0f, + 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x2d, 0x0a, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, + 0x48, 0x41, 0x75, 0x74, 0x68, 0x52, 0x07, 0x73, 0x73, 0x68, 0x41, 0x75, 0x74, 0x68, 0x22, 0x82, + 0x02, 0x0a, 0x07, 0x53, 0x53, 0x48, 0x41, 0x75, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x55, 0x73, + 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x55, 0x73, 0x65, 0x72, 0x49, 0x44, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x28, 0x0a, 0x0f, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, + 0x64, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x4a, 0x0a, 0x0d, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, + 0x65, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x41, 0x75, + 0x74, 0x68, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x1a, 0x5f, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x12, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x55, 0x73, + 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x67, 0x50, 0x75, + 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x67, 0x50, 0x75, + 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x49, + 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, + 0x64, 0x49, 0x70, 0x73, 0x12, 0x33, 0x0a, 0x09, 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, + 0x73, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x22, 0x0a, + 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x7e, 0x0a, 0x09, 0x53, 0x53, 0x48, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, + 0x0a, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x73, 0x73, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x73, 0x73, 0x68, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x09, + 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4a, 0x57, 0x54, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6a, 0x77, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x22, 0x20, 0x0a, 0x1e, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xbf, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, + 0x48, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x44, + 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, + 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x4f, 0x53, + 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x16, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x0a, - 0x0a, 0x06, 0x48, 0x4f, 0x53, 0x54, 0x45, 0x44, 0x10, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x50, 0x4b, - 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x15, 0x50, 0x4b, - 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x6c, 0x6f, 0x77, 0x12, 0x42, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, - 0x0a, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, - 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, - 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x55, 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, - 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, - 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, - 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, - 0x61, 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, - 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, - 0x72, 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, - 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, + 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, + 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, + 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, + 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, + 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, + 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, + 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, - 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, - 0x6e, 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, - 0x28, 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x12, 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, - 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, - 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x22, 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, - 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, - 0x0a, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, - 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, - 0x65, 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, - 0x72, 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, - 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, - 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, - 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, - 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, - 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, - 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x49, 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, - 0x44, 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, + 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, + 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, + 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, + 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, + 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x4e, + 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, + 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, + 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, + 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x53, 0x54, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x12, 0x37, + 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, + 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6f, 0x72, + 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, - 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, - 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, - 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x32, 0xcd, 0x04, 0x0a, 0x11, 0x4d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, - 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, - 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, - 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, - 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, + 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x22, + 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x50, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, + 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, + 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, + 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, 0xf2, 0x01, 0x0a, + 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, + 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, + 0x74, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, + 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, + 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, + 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, + 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, + 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, + 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, + 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, + 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, + 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, + 0x01, 0x32, 0x96, 0x05, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, - 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, - 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, + 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, + 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } @@ -4314,142 +4686,152 @@ func file_management_proto_rawDescGZIP() []byte { return file_management_proto_rawDescData } -var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 45) +var file_management_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_management_proto_msgTypes = make([]protoimpl.MessageInfo, 49) var file_management_proto_goTypes = []interface{}{ - (RuleProtocol)(0), // 0: management.RuleProtocol - (RuleDirection)(0), // 1: management.RuleDirection - (RuleAction)(0), // 2: management.RuleAction - (HostConfig_Protocol)(0), // 3: management.HostConfig.Protocol - (DeviceAuthorizationFlowProvider)(0), // 4: management.DeviceAuthorizationFlow.provider - (*EncryptedMessage)(nil), // 5: management.EncryptedMessage - (*SyncRequest)(nil), // 6: management.SyncRequest - (*SyncResponse)(nil), // 7: management.SyncResponse - (*SyncMetaRequest)(nil), // 8: management.SyncMetaRequest - (*LoginRequest)(nil), // 9: management.LoginRequest - (*PeerKeys)(nil), // 10: management.PeerKeys - (*Environment)(nil), // 11: management.Environment - (*File)(nil), // 12: management.File - (*Flags)(nil), // 13: management.Flags - (*PeerSystemMeta)(nil), // 14: management.PeerSystemMeta - (*LoginResponse)(nil), // 15: management.LoginResponse - (*ServerKeyResponse)(nil), // 16: management.ServerKeyResponse - (*Empty)(nil), // 17: management.Empty - (*NetbirdConfig)(nil), // 18: management.NetbirdConfig - (*HostConfig)(nil), // 19: management.HostConfig - (*RelayConfig)(nil), // 20: management.RelayConfig - (*FlowConfig)(nil), // 21: management.FlowConfig - (*JWTConfig)(nil), // 22: management.JWTConfig - (*ProtectedHostConfig)(nil), // 23: management.ProtectedHostConfig - (*PeerConfig)(nil), // 24: management.PeerConfig - (*AutoUpdateSettings)(nil), // 25: management.AutoUpdateSettings - (*NetworkMap)(nil), // 26: management.NetworkMap - (*SSHAuth)(nil), // 27: management.SSHAuth - (*MachineUserIndexes)(nil), // 28: management.MachineUserIndexes - (*RemotePeerConfig)(nil), // 29: management.RemotePeerConfig - (*SSHConfig)(nil), // 30: management.SSHConfig - (*DeviceAuthorizationFlowRequest)(nil), // 31: management.DeviceAuthorizationFlowRequest - (*DeviceAuthorizationFlow)(nil), // 32: management.DeviceAuthorizationFlow - (*PKCEAuthorizationFlowRequest)(nil), // 33: management.PKCEAuthorizationFlowRequest - (*PKCEAuthorizationFlow)(nil), // 34: management.PKCEAuthorizationFlow - (*ProviderConfig)(nil), // 35: management.ProviderConfig - (*Route)(nil), // 36: management.Route - (*DNSConfig)(nil), // 37: management.DNSConfig - (*CustomZone)(nil), // 38: management.CustomZone - (*SimpleRecord)(nil), // 39: management.SimpleRecord - (*NameServerGroup)(nil), // 40: management.NameServerGroup - (*NameServer)(nil), // 41: management.NameServer - (*FirewallRule)(nil), // 42: management.FirewallRule - (*NetworkAddress)(nil), // 43: management.NetworkAddress - (*Checks)(nil), // 44: management.Checks - (*PortInfo)(nil), // 45: management.PortInfo - (*RouteFirewallRule)(nil), // 46: management.RouteFirewallRule - (*ForwardingRule)(nil), // 47: management.ForwardingRule - nil, // 48: management.SSHAuth.MachineUsersEntry - (*PortInfo_Range)(nil), // 49: management.PortInfo.Range - (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 51: google.protobuf.Duration + (JobStatus)(0), // 0: management.JobStatus + (RuleProtocol)(0), // 1: management.RuleProtocol + (RuleDirection)(0), // 2: management.RuleDirection + (RuleAction)(0), // 3: management.RuleAction + (HostConfig_Protocol)(0), // 4: management.HostConfig.Protocol + (DeviceAuthorizationFlowProvider)(0), // 5: management.DeviceAuthorizationFlow.provider + (*EncryptedMessage)(nil), // 6: management.EncryptedMessage + (*JobRequest)(nil), // 7: management.JobRequest + (*JobResponse)(nil), // 8: management.JobResponse + (*BundleParameters)(nil), // 9: management.BundleParameters + (*BundleResult)(nil), // 10: management.BundleResult + (*SyncRequest)(nil), // 11: management.SyncRequest + (*SyncResponse)(nil), // 12: management.SyncResponse + (*SyncMetaRequest)(nil), // 13: management.SyncMetaRequest + (*LoginRequest)(nil), // 14: management.LoginRequest + (*PeerKeys)(nil), // 15: management.PeerKeys + (*Environment)(nil), // 16: management.Environment + (*File)(nil), // 17: management.File + (*Flags)(nil), // 18: management.Flags + (*PeerSystemMeta)(nil), // 19: management.PeerSystemMeta + (*LoginResponse)(nil), // 20: management.LoginResponse + (*ServerKeyResponse)(nil), // 21: management.ServerKeyResponse + (*Empty)(nil), // 22: management.Empty + (*NetbirdConfig)(nil), // 23: management.NetbirdConfig + (*HostConfig)(nil), // 24: management.HostConfig + (*RelayConfig)(nil), // 25: management.RelayConfig + (*FlowConfig)(nil), // 26: management.FlowConfig + (*JWTConfig)(nil), // 27: management.JWTConfig + (*ProtectedHostConfig)(nil), // 28: management.ProtectedHostConfig + (*PeerConfig)(nil), // 29: management.PeerConfig + (*AutoUpdateSettings)(nil), // 30: management.AutoUpdateSettings + (*NetworkMap)(nil), // 31: management.NetworkMap + (*SSHAuth)(nil), // 32: management.SSHAuth + (*MachineUserIndexes)(nil), // 33: management.MachineUserIndexes + (*RemotePeerConfig)(nil), // 34: management.RemotePeerConfig + (*SSHConfig)(nil), // 35: management.SSHConfig + (*DeviceAuthorizationFlowRequest)(nil), // 36: management.DeviceAuthorizationFlowRequest + (*DeviceAuthorizationFlow)(nil), // 37: management.DeviceAuthorizationFlow + (*PKCEAuthorizationFlowRequest)(nil), // 38: management.PKCEAuthorizationFlowRequest + (*PKCEAuthorizationFlow)(nil), // 39: management.PKCEAuthorizationFlow + (*ProviderConfig)(nil), // 40: management.ProviderConfig + (*Route)(nil), // 41: management.Route + (*DNSConfig)(nil), // 42: management.DNSConfig + (*CustomZone)(nil), // 43: management.CustomZone + (*SimpleRecord)(nil), // 44: management.SimpleRecord + (*NameServerGroup)(nil), // 45: management.NameServerGroup + (*NameServer)(nil), // 46: management.NameServer + (*FirewallRule)(nil), // 47: management.FirewallRule + (*NetworkAddress)(nil), // 48: management.NetworkAddress + (*Checks)(nil), // 49: management.Checks + (*PortInfo)(nil), // 50: management.PortInfo + (*RouteFirewallRule)(nil), // 51: management.RouteFirewallRule + (*ForwardingRule)(nil), // 52: management.ForwardingRule + nil, // 53: management.SSHAuth.MachineUsersEntry + (*PortInfo_Range)(nil), // 54: management.PortInfo.Range + (*timestamppb.Timestamp)(nil), // 55: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 56: google.protobuf.Duration } var file_management_proto_depIdxs = []int32{ - 14, // 0: management.SyncRequest.meta:type_name -> management.PeerSystemMeta - 18, // 1: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig - 24, // 2: management.SyncResponse.peerConfig:type_name -> management.PeerConfig - 29, // 3: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig - 26, // 4: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap - 44, // 5: management.SyncResponse.Checks:type_name -> management.Checks - 14, // 6: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta - 14, // 7: management.LoginRequest.meta:type_name -> management.PeerSystemMeta - 10, // 8: management.LoginRequest.peerKeys:type_name -> management.PeerKeys - 43, // 9: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress - 11, // 10: management.PeerSystemMeta.environment:type_name -> management.Environment - 12, // 11: management.PeerSystemMeta.files:type_name -> management.File - 13, // 12: management.PeerSystemMeta.flags:type_name -> management.Flags - 18, // 13: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig - 24, // 14: management.LoginResponse.peerConfig:type_name -> management.PeerConfig - 44, // 15: management.LoginResponse.Checks:type_name -> management.Checks - 50, // 16: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp - 19, // 17: management.NetbirdConfig.stuns:type_name -> management.HostConfig - 23, // 18: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig - 19, // 19: management.NetbirdConfig.signal:type_name -> management.HostConfig - 20, // 20: management.NetbirdConfig.relay:type_name -> management.RelayConfig - 21, // 21: management.NetbirdConfig.flow:type_name -> management.FlowConfig - 3, // 22: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol - 51, // 23: management.FlowConfig.interval:type_name -> google.protobuf.Duration - 19, // 24: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig - 30, // 25: management.PeerConfig.sshConfig:type_name -> management.SSHConfig - 25, // 26: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings - 24, // 27: management.NetworkMap.peerConfig:type_name -> management.PeerConfig - 29, // 28: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig - 36, // 29: management.NetworkMap.Routes:type_name -> management.Route - 37, // 30: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig - 29, // 31: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig - 42, // 32: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule - 46, // 33: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule - 47, // 34: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule - 27, // 35: management.NetworkMap.sshAuth:type_name -> management.SSHAuth - 48, // 36: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry - 30, // 37: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig - 22, // 38: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig - 4, // 39: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider - 35, // 40: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 35, // 41: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig - 40, // 42: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup - 38, // 43: management.DNSConfig.CustomZones:type_name -> management.CustomZone - 39, // 44: management.CustomZone.Records:type_name -> management.SimpleRecord - 41, // 45: management.NameServerGroup.NameServers:type_name -> management.NameServer - 1, // 46: management.FirewallRule.Direction:type_name -> management.RuleDirection - 2, // 47: management.FirewallRule.Action:type_name -> management.RuleAction - 0, // 48: management.FirewallRule.Protocol:type_name -> management.RuleProtocol - 45, // 49: management.FirewallRule.PortInfo:type_name -> management.PortInfo - 49, // 50: management.PortInfo.range:type_name -> management.PortInfo.Range - 2, // 51: management.RouteFirewallRule.action:type_name -> management.RuleAction - 0, // 52: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol - 45, // 53: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo - 0, // 54: management.ForwardingRule.protocol:type_name -> management.RuleProtocol - 45, // 55: management.ForwardingRule.destinationPort:type_name -> management.PortInfo - 45, // 56: management.ForwardingRule.translatedPort:type_name -> management.PortInfo - 28, // 57: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes - 5, // 58: management.ManagementService.Login:input_type -> management.EncryptedMessage - 5, // 59: management.ManagementService.Sync:input_type -> management.EncryptedMessage - 17, // 60: management.ManagementService.GetServerKey:input_type -> management.Empty - 17, // 61: management.ManagementService.isHealthy:input_type -> management.Empty - 5, // 62: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage - 5, // 63: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage - 5, // 64: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage - 5, // 65: management.ManagementService.Logout:input_type -> management.EncryptedMessage - 5, // 66: management.ManagementService.Login:output_type -> management.EncryptedMessage - 5, // 67: management.ManagementService.Sync:output_type -> management.EncryptedMessage - 16, // 68: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse - 17, // 69: management.ManagementService.isHealthy:output_type -> management.Empty - 5, // 70: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage - 5, // 71: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage - 17, // 72: management.ManagementService.SyncMeta:output_type -> management.Empty - 17, // 73: management.ManagementService.Logout:output_type -> management.Empty - 66, // [66:74] is the sub-list for method output_type - 58, // [58:66] is the sub-list for method input_type - 58, // [58:58] is the sub-list for extension type_name - 58, // [58:58] is the sub-list for extension extendee - 0, // [0:58] is the sub-list for field type_name + 9, // 0: management.JobRequest.bundle:type_name -> management.BundleParameters + 0, // 1: management.JobResponse.status:type_name -> management.JobStatus + 10, // 2: management.JobResponse.bundle:type_name -> management.BundleResult + 19, // 3: management.SyncRequest.meta:type_name -> management.PeerSystemMeta + 23, // 4: management.SyncResponse.netbirdConfig:type_name -> management.NetbirdConfig + 29, // 5: management.SyncResponse.peerConfig:type_name -> management.PeerConfig + 34, // 6: management.SyncResponse.remotePeers:type_name -> management.RemotePeerConfig + 31, // 7: management.SyncResponse.NetworkMap:type_name -> management.NetworkMap + 49, // 8: management.SyncResponse.Checks:type_name -> management.Checks + 19, // 9: management.SyncMetaRequest.meta:type_name -> management.PeerSystemMeta + 19, // 10: management.LoginRequest.meta:type_name -> management.PeerSystemMeta + 15, // 11: management.LoginRequest.peerKeys:type_name -> management.PeerKeys + 48, // 12: management.PeerSystemMeta.networkAddresses:type_name -> management.NetworkAddress + 16, // 13: management.PeerSystemMeta.environment:type_name -> management.Environment + 17, // 14: management.PeerSystemMeta.files:type_name -> management.File + 18, // 15: management.PeerSystemMeta.flags:type_name -> management.Flags + 23, // 16: management.LoginResponse.netbirdConfig:type_name -> management.NetbirdConfig + 29, // 17: management.LoginResponse.peerConfig:type_name -> management.PeerConfig + 49, // 18: management.LoginResponse.Checks:type_name -> management.Checks + 55, // 19: management.ServerKeyResponse.expiresAt:type_name -> google.protobuf.Timestamp + 24, // 20: management.NetbirdConfig.stuns:type_name -> management.HostConfig + 28, // 21: management.NetbirdConfig.turns:type_name -> management.ProtectedHostConfig + 24, // 22: management.NetbirdConfig.signal:type_name -> management.HostConfig + 25, // 23: management.NetbirdConfig.relay:type_name -> management.RelayConfig + 26, // 24: management.NetbirdConfig.flow:type_name -> management.FlowConfig + 4, // 25: management.HostConfig.protocol:type_name -> management.HostConfig.Protocol + 56, // 26: management.FlowConfig.interval:type_name -> google.protobuf.Duration + 24, // 27: management.ProtectedHostConfig.hostConfig:type_name -> management.HostConfig + 35, // 28: management.PeerConfig.sshConfig:type_name -> management.SSHConfig + 30, // 29: management.PeerConfig.autoUpdate:type_name -> management.AutoUpdateSettings + 29, // 30: management.NetworkMap.peerConfig:type_name -> management.PeerConfig + 34, // 31: management.NetworkMap.remotePeers:type_name -> management.RemotePeerConfig + 41, // 32: management.NetworkMap.Routes:type_name -> management.Route + 42, // 33: management.NetworkMap.DNSConfig:type_name -> management.DNSConfig + 34, // 34: management.NetworkMap.offlinePeers:type_name -> management.RemotePeerConfig + 47, // 35: management.NetworkMap.FirewallRules:type_name -> management.FirewallRule + 51, // 36: management.NetworkMap.routesFirewallRules:type_name -> management.RouteFirewallRule + 52, // 37: management.NetworkMap.forwardingRules:type_name -> management.ForwardingRule + 32, // 38: management.NetworkMap.sshAuth:type_name -> management.SSHAuth + 53, // 39: management.SSHAuth.machine_users:type_name -> management.SSHAuth.MachineUsersEntry + 35, // 40: management.RemotePeerConfig.sshConfig:type_name -> management.SSHConfig + 27, // 41: management.SSHConfig.jwtConfig:type_name -> management.JWTConfig + 5, // 42: management.DeviceAuthorizationFlow.Provider:type_name -> management.DeviceAuthorizationFlow.provider + 40, // 43: management.DeviceAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 40, // 44: management.PKCEAuthorizationFlow.ProviderConfig:type_name -> management.ProviderConfig + 45, // 45: management.DNSConfig.NameServerGroups:type_name -> management.NameServerGroup + 43, // 46: management.DNSConfig.CustomZones:type_name -> management.CustomZone + 44, // 47: management.CustomZone.Records:type_name -> management.SimpleRecord + 46, // 48: management.NameServerGroup.NameServers:type_name -> management.NameServer + 2, // 49: management.FirewallRule.Direction:type_name -> management.RuleDirection + 3, // 50: management.FirewallRule.Action:type_name -> management.RuleAction + 1, // 51: management.FirewallRule.Protocol:type_name -> management.RuleProtocol + 50, // 52: management.FirewallRule.PortInfo:type_name -> management.PortInfo + 54, // 53: management.PortInfo.range:type_name -> management.PortInfo.Range + 3, // 54: management.RouteFirewallRule.action:type_name -> management.RuleAction + 1, // 55: management.RouteFirewallRule.protocol:type_name -> management.RuleProtocol + 50, // 56: management.RouteFirewallRule.portInfo:type_name -> management.PortInfo + 1, // 57: management.ForwardingRule.protocol:type_name -> management.RuleProtocol + 50, // 58: management.ForwardingRule.destinationPort:type_name -> management.PortInfo + 50, // 59: management.ForwardingRule.translatedPort:type_name -> management.PortInfo + 33, // 60: management.SSHAuth.MachineUsersEntry.value:type_name -> management.MachineUserIndexes + 6, // 61: management.ManagementService.Login:input_type -> management.EncryptedMessage + 6, // 62: management.ManagementService.Sync:input_type -> management.EncryptedMessage + 22, // 63: management.ManagementService.GetServerKey:input_type -> management.Empty + 22, // 64: management.ManagementService.isHealthy:input_type -> management.Empty + 6, // 65: management.ManagementService.GetDeviceAuthorizationFlow:input_type -> management.EncryptedMessage + 6, // 66: management.ManagementService.GetPKCEAuthorizationFlow:input_type -> management.EncryptedMessage + 6, // 67: management.ManagementService.SyncMeta:input_type -> management.EncryptedMessage + 6, // 68: management.ManagementService.Logout:input_type -> management.EncryptedMessage + 6, // 69: management.ManagementService.Job:input_type -> management.EncryptedMessage + 6, // 70: management.ManagementService.Login:output_type -> management.EncryptedMessage + 6, // 71: management.ManagementService.Sync:output_type -> management.EncryptedMessage + 21, // 72: management.ManagementService.GetServerKey:output_type -> management.ServerKeyResponse + 22, // 73: management.ManagementService.isHealthy:output_type -> management.Empty + 6, // 74: management.ManagementService.GetDeviceAuthorizationFlow:output_type -> management.EncryptedMessage + 6, // 75: management.ManagementService.GetPKCEAuthorizationFlow:output_type -> management.EncryptedMessage + 22, // 76: management.ManagementService.SyncMeta:output_type -> management.Empty + 22, // 77: management.ManagementService.Logout:output_type -> management.Empty + 6, // 78: management.ManagementService.Job:output_type -> management.EncryptedMessage + 70, // [70:79] is the sub-list for method output_type + 61, // [61:70] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name } func init() { file_management_proto_init() } @@ -4471,7 +4853,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncRequest); i { + switch v := v.(*JobRequest); i { case 0: return &v.state case 1: @@ -4483,7 +4865,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncResponse); i { + switch v := v.(*JobResponse); i { case 0: return &v.state case 1: @@ -4495,7 +4877,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncMetaRequest); i { + switch v := v.(*BundleParameters); i { case 0: return &v.state case 1: @@ -4507,7 +4889,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginRequest); i { + switch v := v.(*BundleResult); i { case 0: return &v.state case 1: @@ -4519,7 +4901,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerKeys); i { + switch v := v.(*SyncRequest); i { case 0: return &v.state case 1: @@ -4531,7 +4913,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Environment); i { + switch v := v.(*SyncResponse); i { case 0: return &v.state case 1: @@ -4543,7 +4925,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*File); i { + switch v := v.(*SyncMetaRequest); i { case 0: return &v.state case 1: @@ -4555,7 +4937,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Flags); i { + switch v := v.(*LoginRequest); i { case 0: return &v.state case 1: @@ -4567,7 +4949,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerSystemMeta); i { + switch v := v.(*PeerKeys); i { case 0: return &v.state case 1: @@ -4579,7 +4961,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginResponse); i { + switch v := v.(*Environment); i { case 0: return &v.state case 1: @@ -4591,7 +4973,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerKeyResponse); i { + switch v := v.(*File); i { case 0: return &v.state case 1: @@ -4603,7 +4985,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { + switch v := v.(*Flags); i { case 0: return &v.state case 1: @@ -4615,7 +4997,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetbirdConfig); i { + switch v := v.(*PeerSystemMeta); i { case 0: return &v.state case 1: @@ -4627,7 +5009,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HostConfig); i { + switch v := v.(*LoginResponse); i { case 0: return &v.state case 1: @@ -4639,7 +5021,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RelayConfig); i { + switch v := v.(*ServerKeyResponse); i { case 0: return &v.state case 1: @@ -4651,7 +5033,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FlowConfig); i { + switch v := v.(*Empty); i { case 0: return &v.state case 1: @@ -4663,7 +5045,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JWTConfig); i { + switch v := v.(*NetbirdConfig); i { case 0: return &v.state case 1: @@ -4675,7 +5057,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtectedHostConfig); i { + switch v := v.(*HostConfig); i { case 0: return &v.state case 1: @@ -4687,7 +5069,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerConfig); i { + switch v := v.(*RelayConfig); i { case 0: return &v.state case 1: @@ -4699,7 +5081,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AutoUpdateSettings); i { + switch v := v.(*FlowConfig); i { case 0: return &v.state case 1: @@ -4711,7 +5093,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkMap); i { + switch v := v.(*JWTConfig); i { case 0: return &v.state case 1: @@ -4723,7 +5105,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHAuth); i { + switch v := v.(*ProtectedHostConfig); i { case 0: return &v.state case 1: @@ -4735,7 +5117,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MachineUserIndexes); i { + switch v := v.(*PeerConfig); i { case 0: return &v.state case 1: @@ -4747,7 +5129,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemotePeerConfig); i { + switch v := v.(*AutoUpdateSettings); i { case 0: return &v.state case 1: @@ -4759,7 +5141,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHConfig); i { + switch v := v.(*NetworkMap); i { case 0: return &v.state case 1: @@ -4771,7 +5153,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeviceAuthorizationFlowRequest); i { + switch v := v.(*SSHAuth); i { case 0: return &v.state case 1: @@ -4783,7 +5165,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeviceAuthorizationFlow); i { + switch v := v.(*MachineUserIndexes); i { case 0: return &v.state case 1: @@ -4795,7 +5177,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PKCEAuthorizationFlowRequest); i { + switch v := v.(*RemotePeerConfig); i { case 0: return &v.state case 1: @@ -4807,7 +5189,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PKCEAuthorizationFlow); i { + switch v := v.(*SSHConfig); i { case 0: return &v.state case 1: @@ -4819,7 +5201,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProviderConfig); i { + switch v := v.(*DeviceAuthorizationFlowRequest); i { case 0: return &v.state case 1: @@ -4831,7 +5213,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Route); i { + switch v := v.(*DeviceAuthorizationFlow); i { case 0: return &v.state case 1: @@ -4843,7 +5225,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DNSConfig); i { + switch v := v.(*PKCEAuthorizationFlowRequest); i { case 0: return &v.state case 1: @@ -4855,7 +5237,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomZone); i { + switch v := v.(*PKCEAuthorizationFlow); i { case 0: return &v.state case 1: @@ -4867,7 +5249,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SimpleRecord); i { + switch v := v.(*ProviderConfig); i { case 0: return &v.state case 1: @@ -4879,7 +5261,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NameServerGroup); i { + switch v := v.(*Route); i { case 0: return &v.state case 1: @@ -4891,7 +5273,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NameServer); i { + switch v := v.(*DNSConfig); i { case 0: return &v.state case 1: @@ -4903,7 +5285,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FirewallRule); i { + switch v := v.(*CustomZone); i { case 0: return &v.state case 1: @@ -4915,7 +5297,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NetworkAddress); i { + switch v := v.(*SimpleRecord); i { case 0: return &v.state case 1: @@ -4927,7 +5309,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Checks); i { + switch v := v.(*NameServerGroup); i { case 0: return &v.state case 1: @@ -4939,7 +5321,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PortInfo); i { + switch v := v.(*NameServer); i { case 0: return &v.state case 1: @@ -4951,7 +5333,7 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RouteFirewallRule); i { + switch v := v.(*FirewallRule); i { case 0: return &v.state case 1: @@ -4963,7 +5345,19 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ForwardingRule); i { + switch v := v.(*NetworkAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Checks); i { case 0: return &v.state case 1: @@ -4975,6 +5369,42 @@ func file_management_proto_init() { } } file_management_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PortInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RouteFirewallRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_management_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PortInfo_Range); i { case 0: return &v.state @@ -4987,7 +5417,13 @@ func file_management_proto_init() { } } } - file_management_proto_msgTypes[40].OneofWrappers = []interface{}{ + file_management_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*JobRequest_Bundle)(nil), + } + file_management_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*JobResponse_Bundle)(nil), + } + file_management_proto_msgTypes[44].OneofWrappers = []interface{}{ (*PortInfo_Port)(nil), (*PortInfo_Range_)(nil), } @@ -4996,8 +5432,8 @@ func file_management_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_management_proto_rawDesc, - NumEnums: 5, - NumMessages: 45, + NumEnums: 6, + NumMessages: 49, NumExtensions: 0, NumServices: 1, }, diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index e44b49781..d97d66819 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -48,6 +48,9 @@ service ManagementService { // Logout logs out the peer and removes it from the management server rpc Logout(EncryptedMessage) returns (Empty) {} + + // Executes a job on a target peer (e.g., debug bundle) + rpc Job(stream EncryptedMessage) returns (stream EncryptedMessage) {} } message EncryptedMessage { @@ -60,6 +63,42 @@ message EncryptedMessage { int32 version = 3; } +message JobRequest { + bytes ID = 1; + + oneof workload_parameters { + BundleParameters bundle = 10; + //OtherParameters other = 11; + } +} + +enum JobStatus { + unknown_status = 0; //placeholder + succeeded = 1; + failed = 2; +} + +message JobResponse{ + bytes ID = 1; + JobStatus status=2; + bytes Reason=3; + oneof workload_results { + BundleResult bundle = 10; + //OtherResult other = 11; + } +} + +message BundleParameters { + bool bundle_for = 1; + int64 bundle_for_time = 2; + int32 log_file_count = 3; + bool anonymize = 4; +} + +message BundleResult { + string upload_key = 1; +} + message SyncRequest { // Meta data of the peer PeerSystemMeta meta = 1; diff --git a/shared/management/proto/management_grpc.pb.go b/shared/management/proto/management_grpc.pb.go index 5b189334d..b78e21aaa 100644 --- a/shared/management/proto/management_grpc.pb.go +++ b/shared/management/proto/management_grpc.pb.go @@ -50,6 +50,8 @@ type ManagementServiceClient interface { SyncMeta(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(ctx context.Context, in *EncryptedMessage, opts ...grpc.CallOption) (*Empty, error) + // Executes a job on a target peer (e.g., debug bundle) + Job(ctx context.Context, opts ...grpc.CallOption) (ManagementService_JobClient, error) } type managementServiceClient struct { @@ -155,6 +157,37 @@ func (c *managementServiceClient) Logout(ctx context.Context, in *EncryptedMessa return out, nil } +func (c *managementServiceClient) Job(ctx context.Context, opts ...grpc.CallOption) (ManagementService_JobClient, error) { + stream, err := c.cc.NewStream(ctx, &ManagementService_ServiceDesc.Streams[1], "/management.ManagementService/Job", opts...) + if err != nil { + return nil, err + } + x := &managementServiceJobClient{stream} + return x, nil +} + +type ManagementService_JobClient interface { + Send(*EncryptedMessage) error + Recv() (*EncryptedMessage, error) + grpc.ClientStream +} + +type managementServiceJobClient struct { + grpc.ClientStream +} + +func (x *managementServiceJobClient) Send(m *EncryptedMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *managementServiceJobClient) Recv() (*EncryptedMessage, error) { + m := new(EncryptedMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // ManagementServiceServer is the server API for ManagementService service. // All implementations must embed UnimplementedManagementServiceServer // for forward compatibility @@ -191,6 +224,8 @@ type ManagementServiceServer interface { SyncMeta(context.Context, *EncryptedMessage) (*Empty, error) // Logout logs out the peer and removes it from the management server Logout(context.Context, *EncryptedMessage) (*Empty, error) + // Executes a job on a target peer (e.g., debug bundle) + Job(ManagementService_JobServer) error mustEmbedUnimplementedManagementServiceServer() } @@ -222,6 +257,9 @@ func (UnimplementedManagementServiceServer) SyncMeta(context.Context, *Encrypted func (UnimplementedManagementServiceServer) Logout(context.Context, *EncryptedMessage) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented") } +func (UnimplementedManagementServiceServer) Job(ManagementService_JobServer) error { + return status.Errorf(codes.Unimplemented, "method Job not implemented") +} func (UnimplementedManagementServiceServer) mustEmbedUnimplementedManagementServiceServer() {} // UnsafeManagementServiceServer may be embedded to opt out of forward compatibility for this service. @@ -382,6 +420,32 @@ func _ManagementService_Logout_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _ManagementService_Job_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ManagementServiceServer).Job(&managementServiceJobServer{stream}) +} + +type ManagementService_JobServer interface { + Send(*EncryptedMessage) error + Recv() (*EncryptedMessage, error) + grpc.ServerStream +} + +type managementServiceJobServer struct { + grpc.ServerStream +} + +func (x *managementServiceJobServer) Send(m *EncryptedMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *managementServiceJobServer) Recv() (*EncryptedMessage, error) { + m := new(EncryptedMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // ManagementService_ServiceDesc is the grpc.ServiceDesc for ManagementService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -424,6 +488,12 @@ var ManagementService_ServiceDesc = grpc.ServiceDesc{ Handler: _ManagementService_Sync_Handler, ServerStreams: true, }, + { + StreamName: "Job", + Handler: _ManagementService_Job_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "management.proto", } diff --git a/sharedsock/sock_linux.go b/sharedsock/sock_linux.go index bc2d4d1be..523beb32b 100644 --- a/sharedsock/sock_linux.go +++ b/sharedsock/sock_linux.go @@ -154,9 +154,20 @@ func (s *SharedSocket) updateRouter() { } } -// LocalAddr returns an IPv4 address using the supplied port +// LocalAddr returns the local address, preferring IPv4 for backward compatibility. func (s *SharedSocket) LocalAddr() net.Addr { - // todo check impact on ipv6 discovery + if s.conn4 != nil { + return &net.UDPAddr{ + IP: net.IPv4zero, + Port: s.port, + } + } + if s.conn6 != nil { + return &net.UDPAddr{ + IP: net.IPv6zero, + Port: s.port, + } + } return &net.UDPAddr{ IP: net.IPv4zero, Port: s.port, diff --git a/util/crypt/crypt_test.go b/util/crypt/crypt_test.go new file mode 100644 index 000000000..143a4bbc2 --- /dev/null +++ b/util/crypt/crypt_test.go @@ -0,0 +1,139 @@ +package crypt + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGenerateKey(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + assert.NotEmpty(t, key) + + _, err = NewFieldEncrypt(key) + assert.NoError(t, err) +} + +func TestNewFieldEncrypt_InvalidKey(t *testing.T) { + tests := []struct { + name string + key string + }{ + {name: "invalid base64", key: "not-valid-base64!!!"}, + {name: "too short", key: "c2hvcnQ="}, + {name: "empty", key: ""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := NewFieldEncrypt(tt.key) + assert.Error(t, err) + }) + } +} + +func TestEncryptDecrypt(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + testCases := []struct { + name string + input string + }{ + {name: "Empty String", input: ""}, + {name: "Short String", input: "Hello"}, + {name: "String with Spaces", input: "Hello, World!"}, + {name: "Long String", input: "The quick brown fox jumps over the lazy dog."}, + {name: "Unicode Characters", input: "こんにちは世界"}, + {name: "Special Characters", input: "!@#$%^&*()_+-=[]{}|;':\",./<>?"}, + {name: "Numeric String", input: "1234567890"}, + {name: "Email Address", input: "user@example.com"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + encrypted, err := ec.Encrypt(tc.input) + require.NoError(t, err) + + decrypted, err := ec.Decrypt(encrypted) + require.NoError(t, err) + + assert.Equal(t, tc.input, decrypted) + }) + } +} + +func TestEncrypt_DifferentCiphertexts(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + plaintext := "same plaintext" + + // Encrypt the same plaintext multiple times + encrypted1, err := ec.Encrypt(plaintext) + require.NoError(t, err) + + encrypted2, err := ec.Encrypt(plaintext) + require.NoError(t, err) + + assert.NotEqual(t, encrypted1, encrypted2, "expected different ciphertexts for same plaintext (random nonce)") + + // Both should decrypt to the same plaintext + decrypted1, err := ec.Decrypt(encrypted1) + require.NoError(t, err) + + decrypted2, err := ec.Decrypt(encrypted2) + require.NoError(t, err) + + assert.Equal(t, plaintext, decrypted1) + assert.Equal(t, plaintext, decrypted2) +} + +func TestDecrypt_InvalidCiphertext(t *testing.T) { + key, err := GenerateKey() + assert.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + assert.NoError(t, err) + + tests := []struct { + name string + ciphertext string + }{ + {name: "invalid base64", ciphertext: "not-valid!!!"}, + {name: "too short", ciphertext: "c2hvcnQ="}, + {name: "corrupted", ciphertext: "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo="}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + payload, err := ec.Decrypt(tt.ciphertext) + assert.Error(t, err) + assert.Empty(t, payload) + }) + } +} + +func TestDecrypt_WrongKey(t *testing.T) { + key1, _ := GenerateKey() + key2, _ := GenerateKey() + + ec1, _ := NewFieldEncrypt(key1) + ec2, _ := NewFieldEncrypt(key2) + + plaintext := "secret data" + encrypted, _ := ec1.Encrypt(plaintext) + + // Try to decrypt with wrong key + payload, err := ec2.Decrypt(encrypted) + assert.Error(t, err) + assert.Empty(t, payload) +} diff --git a/util/crypt/legacy.go b/util/crypt/legacy.go new file mode 100644 index 000000000..f84e6964f --- /dev/null +++ b/util/crypt/legacy.go @@ -0,0 +1,71 @@ +package crypt + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "fmt" +) + +// legacyIV is the static IV used by the legacy CBC encryption. +// Deprecated: This is kept only for backward compatibility with existing encrypted data. +var legacyIV = []byte{10, 22, 13, 79, 05, 8, 52, 91, 87, 98, 88, 98, 35, 25, 13, 05} + +// LegacyEncrypt encrypts plaintext using AES-CBC with a static IV. +// Deprecated: Use Encrypt instead. This method is kept only for backward compatibility. +func (f *FieldEncrypt) LegacyEncrypt(plaintext string) string { + padded := pkcs5Padding([]byte(plaintext)) + ciphertext := make([]byte, len(padded)) + cbc := cipher.NewCBCEncrypter(f.block, legacyIV) + cbc.CryptBlocks(ciphertext, padded) + return base64.StdEncoding.EncodeToString(ciphertext) +} + +// LegacyDecrypt decrypts ciphertext that was encrypted using AES-CBC with a static IV. +// Deprecated: This method is kept only for backward compatibility with existing encrypted data. +func (f *FieldEncrypt) LegacyDecrypt(ciphertext string) (string, error) { + data, err := base64.StdEncoding.DecodeString(ciphertext) + if err != nil { + return "", fmt.Errorf("decode ciphertext: %w", err) + } + + cbc := cipher.NewCBCDecrypter(f.block, legacyIV) + cbc.CryptBlocks(data, data) + + plaintext, err := pkcs5UnPadding(data) + if err != nil { + return "", fmt.Errorf("unpad plaintext: %w", err) + } + + return string(plaintext), nil +} + +// pkcs5Padding adds PKCS#5 padding to the input. +func pkcs5Padding(data []byte) []byte { + padding := aes.BlockSize - len(data)%aes.BlockSize + padText := bytes.Repeat([]byte{byte(padding)}, padding) + return append(data, padText...) +} + +// pkcs5UnPadding removes PKCS#5 padding from the input. +func pkcs5UnPadding(data []byte) ([]byte, error) { + length := len(data) + if length == 0 { + return nil, fmt.Errorf("input data is empty") + } + + paddingLen := int(data[length-1]) + if paddingLen == 0 || paddingLen > aes.BlockSize || paddingLen > length { + return nil, fmt.Errorf("invalid padding size") + } + + // Verify that all padding bytes are the same + for i := 0; i < paddingLen; i++ { + if data[length-1-i] != byte(paddingLen) { + return nil, fmt.Errorf("invalid padding") + } + } + + return data[:length-paddingLen], nil +} diff --git a/util/crypt/legacy_test.go b/util/crypt/legacy_test.go new file mode 100644 index 000000000..09b75a71f --- /dev/null +++ b/util/crypt/legacy_test.go @@ -0,0 +1,164 @@ +package crypt + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLegacyEncryptDecrypt(t *testing.T) { + testData := "exampl@netbird.io" + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + encrypted := ec.LegacyEncrypt(testData) + assert.NotEmpty(t, encrypted) + + decrypted, err := ec.LegacyDecrypt(encrypted) + require.NoError(t, err) + + assert.Equal(t, testData, decrypted) +} + +func TestLegacyEncryptDecryptVariousInputs(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + + ec, err := NewFieldEncrypt(key) + require.NoError(t, err) + + testCases := []struct { + name string + input string + }{ + {name: "Empty String", input: ""}, + {name: "Short String", input: "Hello"}, + {name: "String with Spaces", input: "Hello, World!"}, + {name: "Long String", input: "The quick brown fox jumps over the lazy dog."}, + {name: "Unicode Characters", input: "こんにちは世界"}, + {name: "Special Characters", input: "!@#$%^&*()_+-=[]{}|;':\",./<>?"}, + {name: "Numeric String", input: "1234567890"}, + {name: "Repeated Characters", input: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {name: "Multi-block String", input: "This is a longer string that will span multiple blocks in the encryption algorithm."}, + {name: "Non-ASCII and ASCII Mix", input: "Hello 世界 123"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + encrypted := ec.LegacyEncrypt(tc.input) + assert.NotEmpty(t, encrypted) + + decrypted, err := ec.LegacyDecrypt(encrypted) + require.NoError(t, err) + + assert.Equal(t, tc.input, decrypted) + }) + } +} + +func TestPKCS5UnPadding(t *testing.T) { + tests := []struct { + name string + input []byte + expected []byte + expectError bool + }{ + { + name: "Valid Padding", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{4}, 4)...), + expected: []byte("Hello, World!"), + }, + { + name: "Empty Input", + input: []byte{}, + expectError: true, + }, + { + name: "Padding Length Zero", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{0}, 4)...), + expectError: true, + }, + { + name: "Padding Length Exceeds Block Size", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{17}, 17)...), + expectError: true, + }, + { + name: "Padding Length Exceeds Input Length", + input: []byte{5, 5, 5}, + expectError: true, + }, + { + name: "Invalid Padding Bytes", + input: append([]byte("Hello, World!"), []byte{2, 3, 4, 5}...), + expectError: true, + }, + { + name: "Valid Single Byte Padding", + input: append([]byte("Hello, World!"), byte(1)), + expected: []byte("Hello, World!"), + }, + { + name: "Invalid Mixed Padding Bytes", + input: append([]byte("Hello, World!"), []byte{3, 3, 2}...), + expectError: true, + }, + { + name: "Valid Full Block Padding", + input: append([]byte("Hello, World!"), bytes.Repeat([]byte{16}, 16)...), + expected: []byte("Hello, World!"), + }, + { + name: "Non-Padding Byte at End", + input: append([]byte("Hello, World!"), []byte{4, 4, 4, 5}...), + expectError: true, + }, + { + name: "Valid Padding with Different Text Length", + input: append([]byte("Test"), bytes.Repeat([]byte{12}, 12)...), + expected: []byte("Test"), + }, + { + name: "Padding Length Equal to Input Length", + input: bytes.Repeat([]byte{8}, 8), + expected: []byte{}, + }, + { + name: "Invalid Padding Length Zero (Again)", + input: append([]byte("Test"), byte(0)), + expectError: true, + }, + { + name: "Padding Length Greater Than Input", + input: []byte{10}, + expectError: true, + }, + { + name: "Input Length Not Multiple of Block Size", + input: append([]byte("Invalid Length"), byte(1)), + expected: []byte("Invalid Length"), + }, + { + name: "Valid Padding with Non-ASCII Characters", + input: append([]byte("こんにちは"), bytes.Repeat([]byte{2}, 2)...), + expected: []byte("こんにちは"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := pkcs5UnPadding(tt.input) + if tt.expectError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +}