diff --git a/.github/workflows/check-license-dependencies.yml b/.github/workflows/check-license-dependencies.yml index d1d2a8e50..a721cb516 100644 --- a/.github/workflows/check-license-dependencies.yml +++ b/.github/workflows/check-license-dependencies.yml @@ -31,7 +31,7 @@ jobs: while IFS= read -r dir; do echo "=== Checking $dir ===" # Search for problematic imports, excluding test files - RESULTS=$(grep -r "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\)" "$dir" --include="*.go" 2>/dev/null | grep -v "_test.go" | grep -v "test_" | grep -v "/test/" || true) + RESULTS=$(grep -r "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\)" "$dir" --include="*.go" 2>/dev/null | grep -v "_test.go" | grep -v "test_" | grep -v "/test/" | grep -v "tools/idp-migrate/" || true) if [ -n "$RESULTS" ]; then echo "❌ Found problematic dependencies:" echo "$RESULTS" @@ -88,7 +88,7 @@ jobs: IMPORTERS=$(go list -json -deps ./... 2>/dev/null | jq -r "select(.Imports[]? == \"$package\") | .ImportPath") # Check if any importer is NOT in management/signal/relay - BSD_IMPORTER=$(echo "$IMPORTERS" | grep -v "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\|combined\)" | head -1) + BSD_IMPORTER=$(echo "$IMPORTERS" | grep -v "github.com/netbirdio/netbird/\(management\|signal\|relay\|proxy\|combined\|tools/idp-migrate\)" | head -1) if [ -n "$BSD_IMPORTER" ]; then echo "❌ $package ($license) is imported by BSD-licensed code: $BSD_IMPORTER" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 9e753ce73..62dfe9bce 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -19,7 +19,7 @@ jobs: - name: codespell uses: codespell-project/actions-codespell@v2 with: - ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te + ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te,userA skip: go.mod,go.sum,**/proxy/web/** golangci: strategy: diff --git a/.github/workflows/wasm-build-validation.yml b/.github/workflows/wasm-build-validation.yml index 47e45165b..81ae36e78 100644 --- a/.github/workflows/wasm-build-validation.yml +++ b/.github/workflows/wasm-build-validation.yml @@ -61,8 +61,8 @@ jobs: echo "Size: ${SIZE} bytes (${SIZE_MB} MB)" - if [ ${SIZE} -gt 57671680 ]; then - echo "Wasm binary size (${SIZE_MB}MB) exceeds 55MB limit!" + if [ ${SIZE} -gt 58720256 ]; then + echo "Wasm binary size (${SIZE_MB}MB) exceeds 56MB limit!" exit 1 fi diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 65e63dfa8..5ea479148 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -154,6 +154,26 @@ builds: - -s -w -X main.Version={{.Version}} -X main.Commit={{.Commit}} -X main.BuildDate={{.CommitDate}} mod_timestamp: "{{ .CommitTimestamp }}" + - id: netbird-idp-migrate + dir: tools/idp-migrate + env: + - CGO_ENABLED=1 + - >- + {{- if eq .Runtime.Goos "linux" }} + {{- if eq .Arch "arm64"}}CC=aarch64-linux-gnu-gcc{{- end }} + {{- if eq .Arch "arm"}}CC=arm-linux-gnueabihf-gcc{{- end }} + {{- end }} + binary: netbird-idp-migrate + goos: + - linux + goarch: + - amd64 + - arm64 + - arm + ldflags: + - -s -w -X github.com/netbirdio/netbird/version.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser + mod_timestamp: "{{ .CommitTimestamp }}" + universal_binaries: - id: netbird @@ -166,6 +186,10 @@ archives: - netbird-wasm name_template: "{{ .ProjectName }}_{{ .Version }}" format: binary + - id: netbird-idp-migrate + builds: + - netbird-idp-migrate + name_template: "netbird-idp-migrate_{{ .Version }}_{{ .Os }}_{{ .Arch }}" nfpms: - maintainer: Netbird diff --git a/CONTRIBUTOR_LICENSE_AGREEMENT.md b/CONTRIBUTOR_LICENSE_AGREEMENT.md index 1fdd072c9..b0a6ee218 100644 --- a/CONTRIBUTOR_LICENSE_AGREEMENT.md +++ b/CONTRIBUTOR_LICENSE_AGREEMENT.md @@ -1,7 +1,7 @@ ## Contributor License Agreement This Contributor License Agreement (referred to as the "Agreement") is entered into by the individual -submitting this Agreement and NetBird GmbH, c/o Max-Beer-Straße 2-4 Münzstraße 12 10178 Berlin, Germany, +submitting this Agreement and NetBird GmbH, Brunnenstraße 196, 10119 Berlin, Germany, referred to as "NetBird" (collectively, the "Parties"). The Agreement outlines the terms and conditions under which NetBird may utilize software contributions provided by the Contributor for inclusion in its software development projects. By submitting this Agreement, the Contributor confirms their acceptance diff --git a/client/android/client.go b/client/android/client.go index 3fc571559..d35bf4279 100644 --- a/client/android/client.go +++ b/client/android/client.go @@ -205,7 +205,7 @@ func (c *Client) PeersList() *PeerInfoArray { pi := PeerInfo{ p.IP, p.FQDN, - p.ConnStatus.String(), + int(p.ConnStatus), PeerRoutes{routes: maps.Keys(p.GetRoutes())}, } peerInfos[n] = pi diff --git a/client/android/peer_notifier.go b/client/android/peer_notifier.go index b03947da1..4ec22f3ab 100644 --- a/client/android/peer_notifier.go +++ b/client/android/peer_notifier.go @@ -2,11 +2,20 @@ package android +import "github.com/netbirdio/netbird/client/internal/peer" + +// Connection status constants exported via gomobile. +const ( + ConnStatusIdle = int(peer.StatusIdle) + ConnStatusConnecting = int(peer.StatusConnecting) + ConnStatusConnected = int(peer.StatusConnected) +) + // PeerInfo describe information about the peers. It designed for the UI usage type PeerInfo struct { IP string FQDN string - ConnStatus string // Todo replace to enum + ConnStatus int Routes PeerRoutes } diff --git a/client/cmd/debug.go b/client/cmd/debug.go index 0e2717756..e3d3afe5f 100644 --- a/client/cmd/debug.go +++ b/client/cmd/debug.go @@ -199,9 +199,11 @@ func runForDuration(cmd *cobra.Command, args []string) error { cmd.Println("Log level set to trace.") } + needsRestoreUp := false if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil { cmd.PrintErrf("Failed to bring service down: %v\n", status.Convert(err).Message()) } else { + needsRestoreUp = !stateWasDown cmd.Println("netbird down") } @@ -217,6 +219,7 @@ func runForDuration(cmd *cobra.Command, args []string) error { if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil { cmd.PrintErrf("Failed to bring service up: %v\n", status.Convert(err).Message()) } else { + needsRestoreUp = false cmd.Println("netbird up") } @@ -264,6 +267,14 @@ func runForDuration(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to bundle debug: %v", status.Convert(err).Message()) } + if needsRestoreUp { + if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil { + cmd.PrintErrf("Failed to restore service up state: %v\n", status.Convert(err).Message()) + } else { + cmd.Println("netbird up (restored)") + } + } + if stateWasDown { if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil { cmd.PrintErrf("Failed to restore service down state: %v\n", status.Convert(err).Message()) diff --git a/client/cmd/expose.go b/client/cmd/expose.go index 1334617d8..c48a6adac 100644 --- a/client/cmd/expose.go +++ b/client/cmd/expose.go @@ -14,7 +14,9 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "google.golang.org/grpc/status" + "github.com/netbirdio/netbird/client/internal/expose" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/util" ) @@ -200,7 +202,7 @@ func exposeFn(cmd *cobra.Command, args []string) error { stream, err := client.ExposeService(ctx, req) if err != nil { - return fmt.Errorf("expose service: %w", err) + return fmt.Errorf("expose service: %v", status.Convert(err).Message()) } if err := handleExposeReady(cmd, stream, port); err != nil { @@ -211,26 +213,31 @@ func exposeFn(cmd *cobra.Command, args []string) error { } func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) { - switch strings.ToLower(exposeProtocol) { - case "http": + p, err := expose.ParseProtocolType(exposeProtocol) + if err != nil { + return 0, fmt.Errorf("invalid protocol: %w", err) + } + + switch p { + case expose.ProtocolHTTP: return proto.ExposeProtocol_EXPOSE_HTTP, nil - case "https": + case expose.ProtocolHTTPS: return proto.ExposeProtocol_EXPOSE_HTTPS, nil - case "tcp": + case expose.ProtocolTCP: return proto.ExposeProtocol_EXPOSE_TCP, nil - case "udp": + case expose.ProtocolUDP: return proto.ExposeProtocol_EXPOSE_UDP, nil - case "tls": + case expose.ProtocolTLS: return proto.ExposeProtocol_EXPOSE_TLS, nil default: - return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol) + return 0, fmt.Errorf("unhandled protocol type: %d", p) } } func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServiceClient, port uint64) error { event, err := stream.Recv() if err != nil { - return fmt.Errorf("receive expose event: %w", err) + return fmt.Errorf("receive expose event: %v", status.Convert(err).Message()) } ready, ok := event.Event.(*proto.ExposeServiceEvent_Ready) diff --git a/client/cmd/service.go b/client/cmd/service.go index e55465875..5ff16eaeb 100644 --- a/client/cmd/service.go +++ b/client/cmd/service.go @@ -41,7 +41,7 @@ func init() { defaultServiceName = "Netbird" } - serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd, svcStatusCmd, installCmd, uninstallCmd, reconfigureCmd) + serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd, svcStatusCmd, installCmd, uninstallCmd, reconfigureCmd, resetParamsCmd) serviceCmd.PersistentFlags().BoolVar(&profilesDisabled, "disable-profiles", false, "Disables profiles feature. If enabled, the client will not be able to change or edit any profile. To persist this setting, use: netbird service install --disable-profiles") serviceCmd.PersistentFlags().BoolVar(&updateSettingsDisabled, "disable-update-settings", false, "Disables update settings feature. If enabled, the client will not be able to change or edit any settings. To persist this setting, use: netbird service install --disable-update-settings") diff --git a/client/cmd/service_installer.go b/client/cmd/service_installer.go index f6828d96a..28770ea16 100644 --- a/client/cmd/service_installer.go +++ b/client/cmd/service_installer.go @@ -119,6 +119,10 @@ var installCmd = &cobra.Command{ return err } + if err := loadAndApplyServiceParams(cmd); err != nil { + cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) + } + svcConfig, err := createServiceConfigForInstall() if err != nil { return err @@ -136,6 +140,10 @@ var installCmd = &cobra.Command{ return fmt.Errorf("install service: %w", err) } + if err := saveServiceParams(currentServiceParams()); err != nil { + cmd.PrintErrf("Warning: failed to save service params: %v\n", err) + } + cmd.Println("NetBird service has been installed") return nil }, @@ -187,6 +195,10 @@ This command will temporarily stop the service, update its configuration, and re return err } + if err := loadAndApplyServiceParams(cmd); err != nil { + cmd.PrintErrf("Warning: failed to load saved service params: %v\n", err) + } + wasRunning, err := isServiceRunning() if err != nil && !errors.Is(err, ErrGetServiceStatus) { return fmt.Errorf("check service status: %w", err) @@ -222,6 +234,10 @@ This command will temporarily stop the service, update its configuration, and re return fmt.Errorf("install service with new config: %w", err) } + if err := saveServiceParams(currentServiceParams()); err != nil { + cmd.PrintErrf("Warning: failed to save service params: %v\n", err) + } + if wasRunning { cmd.Println("Starting NetBird service...") if err := s.Start(); err != nil { diff --git a/client/cmd/service_params.go b/client/cmd/service_params.go new file mode 100644 index 000000000..81bd2dbb5 --- /dev/null +++ b/client/cmd/service_params.go @@ -0,0 +1,201 @@ +//go:build !ios && !android + +package cmd + +import ( + "context" + "encoding/json" + "fmt" + "maps" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/netbirdio/netbird/client/configs" + "github.com/netbirdio/netbird/util" +) + +const serviceParamsFile = "service.json" + +// serviceParams holds install-time service parameters that persist across +// uninstall/reinstall cycles. Saved to /service.json. +type serviceParams struct { + LogLevel string `json:"log_level"` + DaemonAddr string `json:"daemon_addr"` + ManagementURL string `json:"management_url,omitempty"` + ConfigPath string `json:"config_path,omitempty"` + LogFiles []string `json:"log_files,omitempty"` + DisableProfiles bool `json:"disable_profiles,omitempty"` + DisableUpdateSettings bool `json:"disable_update_settings,omitempty"` + ServiceEnvVars map[string]string `json:"service_env_vars,omitempty"` +} + +// serviceParamsPath returns the path to the service params file. +func serviceParamsPath() string { + return filepath.Join(configs.StateDir, serviceParamsFile) +} + +// loadServiceParams reads saved service parameters from disk. +// Returns nil with no error if the file does not exist. +func loadServiceParams() (*serviceParams, error) { + path := serviceParamsPath() + + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil //nolint:nilnil + } + return nil, fmt.Errorf("read service params %s: %w", path, err) + } + + var params serviceParams + if err := json.Unmarshal(data, ¶ms); err != nil { + return nil, fmt.Errorf("parse service params %s: %w", path, err) + } + + return ¶ms, nil +} + +// saveServiceParams writes current service parameters to disk atomically +// with restricted permissions. +func saveServiceParams(params *serviceParams) error { + path := serviceParamsPath() + if err := util.WriteJsonWithRestrictedPermission(context.Background(), path, params); err != nil { + return fmt.Errorf("save service params: %w", err) + } + return nil +} + +// currentServiceParams captures the current state of all package-level +// variables into a serviceParams struct. +func currentServiceParams() *serviceParams { + params := &serviceParams{ + LogLevel: logLevel, + DaemonAddr: daemonAddr, + ManagementURL: managementURL, + ConfigPath: configPath, + LogFiles: logFiles, + DisableProfiles: profilesDisabled, + DisableUpdateSettings: updateSettingsDisabled, + } + + if len(serviceEnvVars) > 0 { + parsed, err := parseServiceEnvVars(serviceEnvVars) + if err == nil && len(parsed) > 0 { + params.ServiceEnvVars = parsed + } + } + + return params +} + +// loadAndApplyServiceParams loads saved params from disk and applies them +// to any flags that were not explicitly set. +func loadAndApplyServiceParams(cmd *cobra.Command) error { + params, err := loadServiceParams() + if err != nil { + return err + } + applyServiceParams(cmd, params) + return nil +} + +// applyServiceParams merges saved parameters into package-level variables +// for any flag that was not explicitly set by the user (via CLI or env var). +// Flags that were Changed() are left untouched. +func applyServiceParams(cmd *cobra.Command, params *serviceParams) { + if params == nil { + return + } + + // For fields with non-empty defaults (log-level, daemon-addr), keep the + // != "" guard so that an older service.json missing the field doesn't + // clobber the default with an empty string. + if !rootCmd.PersistentFlags().Changed("log-level") && params.LogLevel != "" { + logLevel = params.LogLevel + } + + if !rootCmd.PersistentFlags().Changed("daemon-addr") && params.DaemonAddr != "" { + daemonAddr = params.DaemonAddr + } + + // For optional fields where empty means "use default", always apply so + // that an explicit clear (--management-url "") persists across reinstalls. + if !rootCmd.PersistentFlags().Changed("management-url") { + managementURL = params.ManagementURL + } + + if !rootCmd.PersistentFlags().Changed("config") { + configPath = params.ConfigPath + } + + if !rootCmd.PersistentFlags().Changed("log-file") { + logFiles = params.LogFiles + } + + if !serviceCmd.PersistentFlags().Changed("disable-profiles") { + profilesDisabled = params.DisableProfiles + } + + if !serviceCmd.PersistentFlags().Changed("disable-update-settings") { + updateSettingsDisabled = params.DisableUpdateSettings + } + + applyServiceEnvParams(cmd, params) +} + +// applyServiceEnvParams merges saved service environment variables. +// If --service-env was explicitly set, explicit values win on key conflict +// but saved keys not in the explicit set are carried over. +// If --service-env was not set, saved env vars are used entirely. +func applyServiceEnvParams(cmd *cobra.Command, params *serviceParams) { + if len(params.ServiceEnvVars) == 0 { + return + } + + if !cmd.Flags().Changed("service-env") { + // No explicit env vars: rebuild serviceEnvVars from saved params. + serviceEnvVars = envMapToSlice(params.ServiceEnvVars) + return + } + + // Explicit env vars were provided: merge saved values underneath. + explicit, err := parseServiceEnvVars(serviceEnvVars) + if err != nil { + cmd.PrintErrf("Warning: parse explicit service env vars for merge: %v\n", err) + return + } + + merged := make(map[string]string, len(params.ServiceEnvVars)+len(explicit)) + maps.Copy(merged, params.ServiceEnvVars) + maps.Copy(merged, explicit) // explicit wins on conflict + serviceEnvVars = envMapToSlice(merged) +} + +var resetParamsCmd = &cobra.Command{ + Use: "reset-params", + Short: "Remove saved service install parameters", + Long: "Removes the saved service.json file so the next install uses default parameters.", + RunE: func(cmd *cobra.Command, args []string) error { + path := serviceParamsPath() + if err := os.Remove(path); err != nil { + if os.IsNotExist(err) { + cmd.Println("No saved service parameters found") + return nil + } + return fmt.Errorf("remove service params: %w", err) + } + cmd.Printf("Removed saved service parameters (%s)\n", path) + return nil + }, +} + +// envMapToSlice converts a map of env vars to a KEY=VALUE slice. +func envMapToSlice(m map[string]string) []string { + s := make([]string, 0, len(m)) + for k, v := range m { + s = append(s, k+"="+v) + } + return s +} diff --git a/client/cmd/service_params_test.go b/client/cmd/service_params_test.go new file mode 100644 index 000000000..3bc8e4f60 --- /dev/null +++ b/client/cmd/service_params_test.go @@ -0,0 +1,523 @@ +//go:build !ios && !android + +package cmd + +import ( + "encoding/json" + "go/ast" + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/configs" +) + +func TestServiceParamsPath(t *testing.T) { + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + + configs.StateDir = "/var/lib/netbird" + assert.Equal(t, filepath.Join("/var/lib/netbird", "service.json"), serviceParamsPath()) + + configs.StateDir = "/custom/state" + assert.Equal(t, filepath.Join("/custom/state", "service.json"), serviceParamsPath()) +} + +func TestSaveAndLoadServiceParams(t *testing.T) { + tmpDir := t.TempDir() + + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + configs.StateDir = tmpDir + + params := &serviceParams{ + LogLevel: "debug", + DaemonAddr: "unix:///var/run/netbird.sock", + ManagementURL: "https://my.server.com", + ConfigPath: "/etc/netbird/config.json", + LogFiles: []string{"/var/log/netbird/client.log", "console"}, + DisableProfiles: true, + DisableUpdateSettings: false, + ServiceEnvVars: map[string]string{"NB_LOG_FORMAT": "json", "CUSTOM": "val"}, + } + + err := saveServiceParams(params) + require.NoError(t, err) + + // Verify the file exists and is valid JSON. + data, err := os.ReadFile(filepath.Join(tmpDir, "service.json")) + require.NoError(t, err) + assert.True(t, json.Valid(data)) + + loaded, err := loadServiceParams() + require.NoError(t, err) + require.NotNil(t, loaded) + + assert.Equal(t, params.LogLevel, loaded.LogLevel) + assert.Equal(t, params.DaemonAddr, loaded.DaemonAddr) + assert.Equal(t, params.ManagementURL, loaded.ManagementURL) + assert.Equal(t, params.ConfigPath, loaded.ConfigPath) + assert.Equal(t, params.LogFiles, loaded.LogFiles) + assert.Equal(t, params.DisableProfiles, loaded.DisableProfiles) + assert.Equal(t, params.DisableUpdateSettings, loaded.DisableUpdateSettings) + assert.Equal(t, params.ServiceEnvVars, loaded.ServiceEnvVars) +} + +func TestLoadServiceParams_FileNotExists(t *testing.T) { + tmpDir := t.TempDir() + + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + configs.StateDir = tmpDir + + params, err := loadServiceParams() + assert.NoError(t, err) + assert.Nil(t, params) +} + +func TestLoadServiceParams_InvalidJSON(t *testing.T) { + tmpDir := t.TempDir() + + original := configs.StateDir + t.Cleanup(func() { configs.StateDir = original }) + configs.StateDir = tmpDir + + err := os.WriteFile(filepath.Join(tmpDir, "service.json"), []byte("not json"), 0600) + require.NoError(t, err) + + params, err := loadServiceParams() + assert.Error(t, err) + assert.Nil(t, params) +} + +func TestCurrentServiceParams(t *testing.T) { + origLogLevel := logLevel + origDaemonAddr := daemonAddr + origManagementURL := managementURL + origConfigPath := configPath + origLogFiles := logFiles + origProfilesDisabled := profilesDisabled + origUpdateSettingsDisabled := updateSettingsDisabled + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { + logLevel = origLogLevel + daemonAddr = origDaemonAddr + managementURL = origManagementURL + configPath = origConfigPath + logFiles = origLogFiles + profilesDisabled = origProfilesDisabled + updateSettingsDisabled = origUpdateSettingsDisabled + serviceEnvVars = origServiceEnvVars + }) + + logLevel = "trace" + daemonAddr = "tcp://127.0.0.1:9999" + managementURL = "https://mgmt.example.com" + configPath = "/tmp/test-config.json" + logFiles = []string{"/tmp/test.log"} + profilesDisabled = true + updateSettingsDisabled = true + serviceEnvVars = []string{"FOO=bar", "BAZ=qux"} + + params := currentServiceParams() + + assert.Equal(t, "trace", params.LogLevel) + assert.Equal(t, "tcp://127.0.0.1:9999", params.DaemonAddr) + assert.Equal(t, "https://mgmt.example.com", params.ManagementURL) + assert.Equal(t, "/tmp/test-config.json", params.ConfigPath) + assert.Equal(t, []string{"/tmp/test.log"}, params.LogFiles) + assert.True(t, params.DisableProfiles) + assert.True(t, params.DisableUpdateSettings) + assert.Equal(t, map[string]string{"FOO": "bar", "BAZ": "qux"}, params.ServiceEnvVars) +} + +func TestApplyServiceParams_OnlyUnchangedFlags(t *testing.T) { + origLogLevel := logLevel + origDaemonAddr := daemonAddr + origManagementURL := managementURL + origConfigPath := configPath + origLogFiles := logFiles + origProfilesDisabled := profilesDisabled + origUpdateSettingsDisabled := updateSettingsDisabled + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { + logLevel = origLogLevel + daemonAddr = origDaemonAddr + managementURL = origManagementURL + configPath = origConfigPath + logFiles = origLogFiles + profilesDisabled = origProfilesDisabled + updateSettingsDisabled = origUpdateSettingsDisabled + serviceEnvVars = origServiceEnvVars + }) + + // Reset all flags to defaults. + logLevel = "info" + daemonAddr = "unix:///var/run/netbird.sock" + managementURL = "" + configPath = "/etc/netbird/config.json" + logFiles = []string{"/var/log/netbird/client.log"} + profilesDisabled = false + updateSettingsDisabled = false + serviceEnvVars = nil + + // Reset Changed state on all relevant flags. + rootCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + serviceCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + + // Simulate user explicitly setting --log-level via CLI. + logLevel = "warn" + require.NoError(t, rootCmd.PersistentFlags().Set("log-level", "warn")) + + saved := &serviceParams{ + LogLevel: "debug", + DaemonAddr: "tcp://127.0.0.1:5555", + ManagementURL: "https://saved.example.com", + ConfigPath: "/saved/config.json", + LogFiles: []string{"/saved/client.log"}, + DisableProfiles: true, + DisableUpdateSettings: true, + ServiceEnvVars: map[string]string{"SAVED_KEY": "saved_val"}, + } + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + applyServiceParams(cmd, saved) + + // log-level was Changed, so it should keep "warn", not use saved "debug". + assert.Equal(t, "warn", logLevel) + + // All other fields were not Changed, so they should use saved values. + assert.Equal(t, "tcp://127.0.0.1:5555", daemonAddr) + assert.Equal(t, "https://saved.example.com", managementURL) + assert.Equal(t, "/saved/config.json", configPath) + assert.Equal(t, []string{"/saved/client.log"}, logFiles) + assert.True(t, profilesDisabled) + assert.True(t, updateSettingsDisabled) + assert.Equal(t, []string{"SAVED_KEY=saved_val"}, serviceEnvVars) +} + +func TestApplyServiceParams_BooleanRevertToFalse(t *testing.T) { + origProfilesDisabled := profilesDisabled + origUpdateSettingsDisabled := updateSettingsDisabled + t.Cleanup(func() { + profilesDisabled = origProfilesDisabled + updateSettingsDisabled = origUpdateSettingsDisabled + }) + + // Simulate current state where booleans are true (e.g. set by previous install). + profilesDisabled = true + updateSettingsDisabled = true + + // Reset Changed state so flags appear unset. + serviceCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + + // Saved params have both as false. + saved := &serviceParams{ + DisableProfiles: false, + DisableUpdateSettings: false, + } + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + applyServiceParams(cmd, saved) + + assert.False(t, profilesDisabled, "saved false should override current true") + assert.False(t, updateSettingsDisabled, "saved false should override current true") +} + +func TestApplyServiceParams_ClearManagementURL(t *testing.T) { + origManagementURL := managementURL + t.Cleanup(func() { managementURL = origManagementURL }) + + managementURL = "https://leftover.example.com" + + // Simulate saved params where management URL was explicitly cleared. + saved := &serviceParams{ + LogLevel: "info", + DaemonAddr: "unix:///var/run/netbird.sock", + // ManagementURL intentionally empty: was cleared with --management-url "". + } + + rootCmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { + f.Changed = false + }) + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + applyServiceParams(cmd, saved) + + assert.Equal(t, "", managementURL, "saved empty management URL should clear the current value") +} + +func TestApplyServiceParams_NilParams(t *testing.T) { + origLogLevel := logLevel + t.Cleanup(func() { logLevel = origLogLevel }) + + logLevel = "info" + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + + // Should be a no-op. + applyServiceParams(cmd, nil) + assert.Equal(t, "info", logLevel) +} + +func TestApplyServiceEnvParams_MergeExplicitAndSaved(t *testing.T) { + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { serviceEnvVars = origServiceEnvVars }) + + // Set up a command with --service-env marked as Changed. + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + require.NoError(t, cmd.Flags().Set("service-env", "EXPLICIT=yes,OVERLAP=explicit")) + + serviceEnvVars = []string{"EXPLICIT=yes", "OVERLAP=explicit"} + + saved := &serviceParams{ + ServiceEnvVars: map[string]string{ + "SAVED": "val", + "OVERLAP": "saved", + }, + } + + applyServiceEnvParams(cmd, saved) + + // Parse result for easier assertion. + result, err := parseServiceEnvVars(serviceEnvVars) + require.NoError(t, err) + + assert.Equal(t, "yes", result["EXPLICIT"]) + assert.Equal(t, "val", result["SAVED"]) + // Explicit wins on conflict. + assert.Equal(t, "explicit", result["OVERLAP"]) +} + +func TestApplyServiceEnvParams_NotChanged(t *testing.T) { + origServiceEnvVars := serviceEnvVars + t.Cleanup(func() { serviceEnvVars = origServiceEnvVars }) + + serviceEnvVars = nil + + cmd := &cobra.Command{} + cmd.Flags().StringSlice("service-env", nil, "") + + saved := &serviceParams{ + ServiceEnvVars: map[string]string{"FROM_SAVED": "val"}, + } + + applyServiceEnvParams(cmd, saved) + + result, err := parseServiceEnvVars(serviceEnvVars) + require.NoError(t, err) + assert.Equal(t, map[string]string{"FROM_SAVED": "val"}, result) +} + +// TestServiceParams_FieldsCoveredInFunctions ensures that all serviceParams fields are +// referenced in both currentServiceParams() and applyServiceParams(). If a new field is +// added to serviceParams but not wired into these functions, this test fails. +func TestServiceParams_FieldsCoveredInFunctions(t *testing.T) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "service_params.go", nil, 0) + require.NoError(t, err) + + // Collect all JSON field names from the serviceParams struct. + structFields := extractStructJSONFields(t, file, "serviceParams") + require.NotEmpty(t, structFields, "failed to find serviceParams struct fields") + + // Collect field names referenced in currentServiceParams and applyServiceParams. + currentFields := extractFuncFieldRefs(t, file, "currentServiceParams", structFields) + applyFields := extractFuncFieldRefs(t, file, "applyServiceParams", structFields) + // applyServiceEnvParams handles ServiceEnvVars indirectly. + applyEnvFields := extractFuncFieldRefs(t, file, "applyServiceEnvParams", structFields) + for k, v := range applyEnvFields { + applyFields[k] = v + } + + for _, field := range structFields { + assert.Contains(t, currentFields, field, + "serviceParams field %q is not captured in currentServiceParams()", field) + assert.Contains(t, applyFields, field, + "serviceParams field %q is not restored in applyServiceParams()/applyServiceEnvParams()", field) + } +} + +// TestServiceParams_BuildArgsCoversAllFlags ensures that buildServiceArguments references +// all serviceParams fields that should become CLI args. ServiceEnvVars is excluded because +// it flows through newSVCConfig() EnvVars, not CLI args. +func TestServiceParams_BuildArgsCoversAllFlags(t *testing.T) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "service_params.go", nil, 0) + require.NoError(t, err) + + structFields := extractStructJSONFields(t, file, "serviceParams") + require.NotEmpty(t, structFields) + + installerFile, err := parser.ParseFile(fset, "service_installer.go", nil, 0) + require.NoError(t, err) + + // Fields that are handled outside of buildServiceArguments (env vars go through newSVCConfig). + fieldsNotInArgs := map[string]bool{ + "ServiceEnvVars": true, + } + + buildFields := extractFuncGlobalRefs(t, installerFile, "buildServiceArguments") + + // Forward: every struct field must appear in buildServiceArguments. + for _, field := range structFields { + if fieldsNotInArgs[field] { + continue + } + globalVar := fieldToGlobalVar(field) + assert.Contains(t, buildFields, globalVar, + "serviceParams field %q (global %q) is not referenced in buildServiceArguments()", field, globalVar) + } + + // Reverse: every service-related global used in buildServiceArguments must + // have a corresponding serviceParams field. This catches a developer adding + // a new flag to buildServiceArguments without adding it to the struct. + globalToField := make(map[string]string, len(structFields)) + for _, field := range structFields { + globalToField[fieldToGlobalVar(field)] = field + } + // Identifiers in buildServiceArguments that are not service params + // (builtins, boilerplate, loop variables). + nonParamGlobals := map[string]bool{ + "args": true, "append": true, "string": true, "_": true, + "logFile": true, // range variable over logFiles + } + for ref := range buildFields { + if nonParamGlobals[ref] { + continue + } + _, inStruct := globalToField[ref] + assert.True(t, inStruct, + "buildServiceArguments() references global %q which has no corresponding serviceParams field", ref) + } +} + +// extractStructJSONFields returns field names from a named struct type. +func extractStructJSONFields(t *testing.T, file *ast.File, structName string) []string { + t.Helper() + var fields []string + ast.Inspect(file, func(n ast.Node) bool { + ts, ok := n.(*ast.TypeSpec) + if !ok || ts.Name.Name != structName { + return true + } + st, ok := ts.Type.(*ast.StructType) + if !ok { + return false + } + for _, f := range st.Fields.List { + if len(f.Names) > 0 { + fields = append(fields, f.Names[0].Name) + } + } + return false + }) + return fields +} + +// extractFuncFieldRefs returns which of the given field names appear inside the +// named function, either as selector expressions (params.FieldName) or as +// composite literal keys (&serviceParams{FieldName: ...}). +func extractFuncFieldRefs(t *testing.T, file *ast.File, funcName string, fields []string) map[string]bool { + t.Helper() + fieldSet := make(map[string]bool, len(fields)) + for _, f := range fields { + fieldSet[f] = true + } + + found := make(map[string]bool) + fn := findFuncDecl(file, funcName) + require.NotNil(t, fn, "function %s not found", funcName) + + ast.Inspect(fn.Body, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.SelectorExpr: + if fieldSet[v.Sel.Name] { + found[v.Sel.Name] = true + } + case *ast.KeyValueExpr: + if ident, ok := v.Key.(*ast.Ident); ok && fieldSet[ident.Name] { + found[ident.Name] = true + } + } + return true + }) + return found +} + +// extractFuncGlobalRefs returns all identifier names referenced in the named function body. +func extractFuncGlobalRefs(t *testing.T, file *ast.File, funcName string) map[string]bool { + t.Helper() + fn := findFuncDecl(file, funcName) + require.NotNil(t, fn, "function %s not found", funcName) + + refs := make(map[string]bool) + ast.Inspect(fn.Body, func(n ast.Node) bool { + if ident, ok := n.(*ast.Ident); ok { + refs[ident.Name] = true + } + return true + }) + return refs +} + +func findFuncDecl(file *ast.File, name string) *ast.FuncDecl { + for _, decl := range file.Decls { + fn, ok := decl.(*ast.FuncDecl) + if ok && fn.Name.Name == name { + return fn + } + } + return nil +} + +// fieldToGlobalVar maps serviceParams field names to the package-level variable +// names used in buildServiceArguments and applyServiceParams. +func fieldToGlobalVar(field string) string { + m := map[string]string{ + "LogLevel": "logLevel", + "DaemonAddr": "daemonAddr", + "ManagementURL": "managementURL", + "ConfigPath": "configPath", + "LogFiles": "logFiles", + "DisableProfiles": "profilesDisabled", + "DisableUpdateSettings": "updateSettingsDisabled", + "ServiceEnvVars": "serviceEnvVars", + } + if v, ok := m[field]; ok { + return v + } + // Default: lowercase first letter. + return strings.ToLower(field[:1]) + field[1:] +} + +func TestEnvMapToSlice(t *testing.T) { + m := map[string]string{"A": "1", "B": "2"} + s := envMapToSlice(m) + assert.Len(t, s, 2) + assert.Contains(t, s, "A=1") + assert.Contains(t, s, "B=2") +} + +func TestEnvMapToSlice_Empty(t *testing.T) { + s := envMapToSlice(map[string]string{}) + assert.Empty(t, s) +} diff --git a/client/cmd/service_test.go b/client/cmd/service_test.go index 6d75ca524..ce6f71550 100644 --- a/client/cmd/service_test.go +++ b/client/cmd/service_test.go @@ -4,7 +4,9 @@ import ( "context" "fmt" "os" + "os/signal" "runtime" + "syscall" "testing" "time" @@ -13,6 +15,22 @@ import ( "github.com/stretchr/testify/require" ) +// TestMain intercepts when this test binary is run as a daemon subprocess. +// On FreeBSD, the rc.d service script runs the binary via daemon(8) -r with +// "service run ..." arguments. Since the test binary can't handle cobra CLI +// args, it exits immediately, causing daemon -r to respawn rapidly until +// hitting the rate limit and exiting. This makes service restart unreliable. +// Blocking here keeps the subprocess alive until the init system sends SIGTERM. +func TestMain(m *testing.M) { + if len(os.Args) > 2 && os.Args[1] == "service" && os.Args[2] == "run" { + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGTERM, os.Interrupt) + <-sig + return + } + os.Exit(m.Run()) +} + const ( serviceStartTimeout = 10 * time.Second serviceStopTimeout = 5 * time.Second @@ -79,6 +97,34 @@ func TestServiceLifecycle(t *testing.T) { logLevel = "info" daemonAddr = fmt.Sprintf("unix://%s/netbird-test.sock", tempDir) + // Ensure cleanup even if a subtest fails and Stop/Uninstall subtests don't run. + t.Cleanup(func() { + cfg, err := newSVCConfig() + if err != nil { + t.Errorf("cleanup: create service config: %v", err) + return + } + ctxSvc, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := newSVC(newProgram(ctxSvc, cancel), cfg) + if err != nil { + t.Errorf("cleanup: create service: %v", err) + return + } + + // If the subtests already cleaned up, there's nothing to do. + if _, err := s.Status(); err != nil { + return + } + + if err := s.Stop(); err != nil { + t.Errorf("cleanup: stop service: %v", err) + } + if err := s.Uninstall(); err != nil { + t.Errorf("cleanup: uninstall service: %v", err) + } + }) + ctx := context.Background() t.Run("Install", func(t *testing.T) { diff --git a/client/embed/embed.go b/client/embed/embed.go index 9fa797f18..88f7e541c 100644 --- a/client/embed/embed.go +++ b/client/embed/embed.go @@ -33,14 +33,14 @@ var ( ErrConfigNotInitialized = errors.New("config not initialized") ) -// PeerConnStatus is a peer's connection status. -type PeerConnStatus = peer.ConnStatus - const ( // PeerStatusConnected indicates the peer is in connected state. PeerStatusConnected = peer.StatusConnected ) +// PeerConnStatus is a peer's connection status. +type PeerConnStatus = peer.ConnStatus + // Client manages a netbird embedded client instance. type Client struct { deviceName string @@ -375,6 +375,32 @@ func (c *Client) NewHTTPClient() *http.Client { } } +// Expose exposes a local service via the NetBird reverse proxy, making it accessible through a public URL. +// It returns an ExposeSession. Call Wait on the session to keep it alive. +func (c *Client) Expose(ctx context.Context, req ExposeRequest) (*ExposeSession, error) { + engine, err := c.getEngine() + if err != nil { + return nil, err + } + + mgr := engine.GetExposeManager() + if mgr == nil { + return nil, fmt.Errorf("expose manager not available") + } + + resp, err := mgr.Expose(ctx, req) + if err != nil { + return nil, fmt.Errorf("expose: %w", err) + } + + return &ExposeSession{ + Domain: resp.Domain, + ServiceName: resp.ServiceName, + ServiceURL: resp.ServiceURL, + mgr: mgr, + }, nil +} + // Status returns the current status of the client. func (c *Client) Status() (peer.FullStatus, error) { c.mu.Lock() diff --git a/client/embed/expose.go b/client/embed/expose.go new file mode 100644 index 000000000..825bb90ee --- /dev/null +++ b/client/embed/expose.go @@ -0,0 +1,45 @@ +package embed + +import ( + "context" + "errors" + + "github.com/netbirdio/netbird/client/internal/expose" +) + +const ( + // ExposeProtocolHTTP exposes the service as HTTP. + ExposeProtocolHTTP = expose.ProtocolHTTP + // ExposeProtocolHTTPS exposes the service as HTTPS. + ExposeProtocolHTTPS = expose.ProtocolHTTPS + // ExposeProtocolTCP exposes the service as TCP. + ExposeProtocolTCP = expose.ProtocolTCP + // ExposeProtocolUDP exposes the service as UDP. + ExposeProtocolUDP = expose.ProtocolUDP + // ExposeProtocolTLS exposes the service as TLS. + ExposeProtocolTLS = expose.ProtocolTLS +) + +// ExposeRequest is a request to expose a local service via the NetBird reverse proxy. +type ExposeRequest = expose.Request + +// ExposeProtocolType represents the protocol used for exposing a service. +type ExposeProtocolType = expose.ProtocolType + +// ExposeSession represents an active expose session. Use Wait to block until the session ends. +type ExposeSession struct { + Domain string + ServiceName string + ServiceURL string + + mgr *expose.Manager +} + +// Wait blocks while keeping the expose session alive. +// It returns when ctx is cancelled or a keep-alive error occurs, then terminates the session. +func (s *ExposeSession) Wait(ctx context.Context) error { + if s == nil || s.mgr == nil { + return errors.New("expose session is not initialized") + } + return s.mgr.KeepAlive(ctx, s.Domain) +} diff --git a/client/firewall/create_linux.go b/client/firewall/create_linux.go index 12dcaee8a..d781ebd77 100644 --- a/client/firewall/create_linux.go +++ b/client/firewall/create_linux.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "os" + "strconv" "github.com/coreos/go-iptables/iptables" "github.com/google/nftables" @@ -35,20 +36,27 @@ const SKIP_NFTABLES_ENV = "NB_SKIP_NFTABLES_CHECK" type FWType int func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager, flowLogger nftypes.FlowLogger, disableServerRoutes bool, mtu uint16) (firewall.Manager, error) { - // on the linux system we try to user nftables or iptables - // in any case, because we need to allow netbird interface traffic - // so we use AllowNetbird traffic from these firewall managers - // for the userspace packet filtering firewall + // We run in userspace mode and force userspace firewall was requested. We don't attempt native firewall. + if iface.IsUserspaceBind() && forceUserspaceFirewall() { + log.Info("forcing userspace firewall") + return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu) + } + + // Use native firewall for either kernel or userspace, the interface appears identical to netfilter fm, err := createNativeFirewall(iface, stateManager, disableServerRoutes, mtu) + // Kernel cannot fall back to anything else, need to return error if !iface.IsUserspaceBind() { return fm, err } + // Fall back to the userspace packet filter if native is unavailable if err != nil { log.Warnf("failed to create native firewall: %v. Proceeding with userspace", err) + return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu) } - return createUserspaceFirewall(iface, fm, disableServerRoutes, flowLogger, mtu) + + return fm, nil } func createNativeFirewall(iface IFaceMapper, stateManager *statemanager.Manager, routes bool, mtu uint16) (firewall.Manager, error) { @@ -160,3 +168,17 @@ func isIptablesClientAvailable(client *iptables.IPTables) bool { _, err := client.ListChains("filter") return err == nil } + +func forceUserspaceFirewall() bool { + val := os.Getenv(EnvForceUserspaceFirewall) + if val == "" { + return false + } + + force, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", EnvForceUserspaceFirewall, err) + return false + } + return force +} diff --git a/client/firewall/iface.go b/client/firewall/iface.go index b83c5f912..491f03269 100644 --- a/client/firewall/iface.go +++ b/client/firewall/iface.go @@ -7,6 +7,12 @@ import ( "github.com/netbirdio/netbird/client/iface/wgaddr" ) +// EnvForceUserspaceFirewall forces the use of the userspace packet filter even when +// native iptables/nftables is available. This only applies when the WireGuard interface +// runs in userspace mode. When set, peer ACLs are handled by USPFilter instead of +// kernel netfilter rules. +const EnvForceUserspaceFirewall = "NB_FORCE_USERSPACE_FIREWALL" + // IFaceMapper defines subset methods of interface required for manager type IFaceMapper interface { Name() string diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 04c338375..a1d4467d5 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -33,7 +33,6 @@ type Manager struct { type iFaceMapper interface { Name() string Address() wgaddr.Address - IsUserspaceBind() bool } // Create iptables firewall manager @@ -64,10 +63,9 @@ func Create(wgIface iFaceMapper, mtu uint16) (*Manager, error) { func (m *Manager) Init(stateManager *statemanager.Manager) error { state := &ShutdownState{ InterfaceState: &InterfaceState{ - NameStr: m.wgIface.Name(), - WGAddress: m.wgIface.Address(), - UserspaceBind: m.wgIface.IsUserspaceBind(), - MTU: m.router.mtu, + NameStr: m.wgIface.Name(), + WGAddress: m.wgIface.Address(), + MTU: m.router.mtu, }, } stateManager.RegisterState(state) @@ -203,12 +201,10 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error { return nberrors.FormatErrorOrNil(merr) } -// AllowNetbird allows netbird interface traffic +// AllowNetbird allows netbird interface traffic. +// This is called when USPFilter wraps the native firewall, adding blanket accept +// rules so that packet filtering is handled in userspace instead of by netfilter. func (m *Manager) AllowNetbird() error { - if !m.wgIface.IsUserspaceBind() { - return nil - } - _, err := m.AddPeerFiltering( nil, net.IP{0, 0, 0, 0}, @@ -286,6 +282,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + const ( chainNameRaw = "NETBIRD-RAW" chainOUTPUT = "OUTPUT" diff --git a/client/firewall/iptables/manager_linux_test.go b/client/firewall/iptables/manager_linux_test.go index ee47a27c0..cc4bda0e0 100644 --- a/client/firewall/iptables/manager_linux_test.go +++ b/client/firewall/iptables/manager_linux_test.go @@ -47,8 +47,6 @@ func (i *iFaceMock) Address() wgaddr.Address { panic("AddressFunc is not set") } -func (i *iFaceMock) IsUserspaceBind() bool { return false } - func TestIptablesManager(t *testing.T) { ipv4Client, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) require.NoError(t, err) diff --git a/client/firewall/iptables/router_linux.go b/client/firewall/iptables/router_linux.go index 1fe4c149f..a7c4f67dd 100644 --- a/client/firewall/iptables/router_linux.go +++ b/client/firewall/iptables/router_linux.go @@ -36,6 +36,7 @@ const ( chainRTFWDOUT = "NETBIRD-RT-FWD-OUT" chainRTPRE = "NETBIRD-RT-PRE" chainRTRDR = "NETBIRD-RT-RDR" + chainNATOutput = "NETBIRD-NAT-OUTPUT" chainRTMSSCLAMP = "NETBIRD-RT-MSSCLAMP" routingFinalForwardJump = "ACCEPT" routingFinalNatJump = "MASQUERADE" @@ -43,6 +44,7 @@ const ( jumpManglePre = "jump-mangle-pre" jumpNatPre = "jump-nat-pre" jumpNatPost = "jump-nat-post" + jumpNatOutput = "jump-nat-output" jumpMSSClamp = "jump-mss-clamp" markManglePre = "mark-mangle-pre" markManglePost = "mark-mangle-post" @@ -387,6 +389,14 @@ func (r *router) cleanUpDefaultForwardRules() error { } log.Debug("flushing routing related tables") + + // Remove jump rules from built-in chains before deleting custom chains, + // otherwise the chain deletion fails with "device or resource busy". + jumpRule := []string{"-j", chainNATOutput} + if err := r.iptablesClient.Delete(tableNat, "OUTPUT", jumpRule...); err != nil { + log.Debugf("clean OUTPUT jump rule: %v", err) + } + for _, chainInfo := range []struct { chain string table string @@ -396,6 +406,7 @@ func (r *router) cleanUpDefaultForwardRules() error { {chainRTPRE, tableMangle}, {chainRTNAT, tableNat}, {chainRTRDR, tableNat}, + {chainNATOutput, tableNat}, {chainRTMSSCLAMP, tableMangle}, } { ok, err := r.iptablesClient.ChainExists(chainInfo.table, chainInfo.chain) @@ -970,6 +981,81 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto return nil } +// ensureNATOutputChain lazily creates the OUTPUT NAT chain and jump rule on first use. +func (r *router) ensureNATOutputChain() error { + if _, exists := r.rules[jumpNatOutput]; exists { + return nil + } + + chainExists, err := r.iptablesClient.ChainExists(tableNat, chainNATOutput) + if err != nil { + return fmt.Errorf("check chain %s: %w", chainNATOutput, err) + } + if !chainExists { + if err := r.iptablesClient.NewChain(tableNat, chainNATOutput); err != nil { + return fmt.Errorf("create chain %s: %w", chainNATOutput, err) + } + } + + jumpRule := []string{"-j", chainNATOutput} + if err := r.iptablesClient.Insert(tableNat, "OUTPUT", 1, jumpRule...); err != nil { + if !chainExists { + if delErr := r.iptablesClient.ClearAndDeleteChain(tableNat, chainNATOutput); delErr != nil { + log.Warnf("failed to rollback chain %s: %v", chainNATOutput, delErr) + } + } + return fmt.Errorf("add OUTPUT jump rule: %w", err) + } + r.rules[jumpNatOutput] = jumpRule + + r.updateState() + return nil +} + +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (r *router) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + if _, exists := r.rules[ruleID]; exists { + return nil + } + + if err := r.ensureNATOutputChain(); err != nil { + return err + } + + dnatRule := []string{ + "-p", strings.ToLower(string(protocol)), + "--dport", strconv.Itoa(int(sourcePort)), + "-d", localAddr.String(), + "-j", "DNAT", + "--to-destination", ":" + strconv.Itoa(int(targetPort)), + } + + if err := r.iptablesClient.Append(tableNat, chainNATOutput, dnatRule...); err != nil { + return fmt.Errorf("add output DNAT rule: %w", err) + } + r.rules[ruleID] = dnatRule + + r.updateState() + return nil +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (r *router) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + if dnatRule, exists := r.rules[ruleID]; exists { + if err := r.iptablesClient.Delete(tableNat, chainNATOutput, dnatRule...); err != nil { + return fmt.Errorf("delete output DNAT rule: %w", err) + } + delete(r.rules, ruleID) + } + + r.updateState() + return nil +} + func applyPort(flag string, port *firewall.Port) []string { if port == nil { return nil diff --git a/client/firewall/iptables/state_linux.go b/client/firewall/iptables/state_linux.go index c88774c1f..121c755e9 100644 --- a/client/firewall/iptables/state_linux.go +++ b/client/firewall/iptables/state_linux.go @@ -9,10 +9,9 @@ import ( ) type InterfaceState struct { - NameStr string `json:"name"` - WGAddress wgaddr.Address `json:"wg_address"` - UserspaceBind bool `json:"userspace_bind"` - MTU uint16 `json:"mtu"` + NameStr string `json:"name"` + WGAddress wgaddr.Address `json:"wg_address"` + MTU uint16 `json:"mtu"` } func (i *InterfaceState) Name() string { @@ -23,10 +22,6 @@ func (i *InterfaceState) Address() wgaddr.Address { return i.WGAddress } -func (i *InterfaceState) IsUserspaceBind() bool { - return i.UserspaceBind -} - type ShutdownState struct { sync.Mutex diff --git a/client/firewall/manager/firewall.go b/client/firewall/manager/firewall.go index 3511a5463..d65d717b3 100644 --- a/client/firewall/manager/firewall.go +++ b/client/firewall/manager/firewall.go @@ -169,6 +169,14 @@ type Manager interface { // RemoveInboundDNAT removes inbound DNAT rule RemoveInboundDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + // AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. + // localAddr must be IPv4; the underlying iptables/nftables backends are IPv4-only. + AddOutputDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + + // RemoveOutputDNAT removes an OUTPUT chain DNAT rule. + // localAddr must be IPv4; the underlying iptables/nftables backends are IPv4-only. + RemoveOutputDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error + // SetupEBPFProxyNoTrack creates static notrack rules for eBPF proxy loopback traffic. // This prevents conntrack from interfering with WireGuard proxy communication. SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index f57b28abc..0b5b61e04 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -40,7 +40,6 @@ func getTableName() string { type iFaceMapper interface { Name() string Address() wgaddr.Address - IsUserspaceBind() bool } // Manager of iptables firewall @@ -106,10 +105,9 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error { // cleanup using Close() without needing to store specific rules. if err := stateManager.UpdateState(&ShutdownState{ InterfaceState: &InterfaceState{ - NameStr: m.wgIface.Name(), - WGAddress: m.wgIface.Address(), - UserspaceBind: m.wgIface.IsUserspaceBind(), - MTU: m.router.mtu, + NameStr: m.wgIface.Name(), + WGAddress: m.wgIface.Address(), + MTU: m.router.mtu, }, }); err != nil { log.Errorf("failed to update state: %v", err) @@ -205,12 +203,10 @@ func (m *Manager) RemoveNatRule(pair firewall.RouterPair) error { return m.router.RemoveNatRule(pair) } -// AllowNetbird allows netbird interface traffic +// AllowNetbird allows netbird interface traffic. +// This is called when USPFilter wraps the native firewall, adding blanket accept +// rules so that packet filtering is handled in userspace instead of by netfilter. func (m *Manager) AllowNetbird() error { - if !m.wgIface.IsUserspaceBind() { - return nil - } - m.mutex.Lock() defer m.mutex.Unlock() @@ -346,6 +342,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort) } +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.router.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + const ( chainNameRawOutput = "netbird-raw-out" chainNameRawPrerouting = "netbird-raw-pre" diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index 75b1e2b6c..d48e4ba88 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -52,8 +52,6 @@ func (i *iFaceMock) Address() wgaddr.Address { panic("AddressFunc is not set") } -func (i *iFaceMock) IsUserspaceBind() bool { return false } - func TestNftablesManager(t *testing.T) { // just check on the local interface diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go index fde654c20..904daf7cb 100644 --- a/client/firewall/nftables/router_linux.go +++ b/client/firewall/nftables/router_linux.go @@ -36,6 +36,7 @@ const ( chainNameRoutingFw = "netbird-rt-fwd" chainNameRoutingNat = "netbird-rt-postrouting" chainNameRoutingRdr = "netbird-rt-redirect" + chainNameNATOutput = "netbird-nat-output" chainNameForward = "FORWARD" chainNameMangleForward = "netbird-mangle-forward" @@ -1853,6 +1854,130 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto return nil } +// ensureNATOutputChain lazily creates the OUTPUT NAT chain on first use. +func (r *router) ensureNATOutputChain() error { + if _, exists := r.chains[chainNameNATOutput]; exists { + return nil + } + + r.chains[chainNameNATOutput] = r.conn.AddChain(&nftables.Chain{ + Name: chainNameNATOutput, + Table: r.workTable, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityNATDest, + Type: nftables.ChainTypeNAT, + }) + + if err := r.conn.Flush(); err != nil { + delete(r.chains, chainNameNATOutput) + return fmt.Errorf("create NAT output chain: %w", err) + } + return nil +} + +// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic. +func (r *router) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + if _, exists := r.rules[ruleID]; exists { + return nil + } + + if err := r.ensureNATOutputChain(); err != nil { + return err + } + + protoNum, err := protoToInt(protocol) + if err != nil { + return fmt.Errorf("convert protocol to number: %w", err) + } + + exprs := []expr.Any{ + &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1}, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 1, + Data: []byte{protoNum}, + }, + &expr.Payload{ + DestRegister: 2, + Base: expr.PayloadBaseTransportHeader, + Offset: 2, + Len: 2, + }, + &expr.Cmp{ + Op: expr.CmpOpEq, + Register: 2, + Data: binaryutil.BigEndian.PutUint16(sourcePort), + }, + } + + exprs = append(exprs, applyPrefix(netip.PrefixFrom(localAddr, 32), false)...) + + exprs = append(exprs, + &expr.Immediate{ + Register: 1, + Data: localAddr.AsSlice(), + }, + &expr.Immediate{ + Register: 2, + Data: binaryutil.BigEndian.PutUint16(targetPort), + }, + &expr.NAT{ + Type: expr.NATTypeDestNAT, + Family: uint32(nftables.TableFamilyIPv4), + RegAddrMin: 1, + RegProtoMin: 2, + }, + ) + + dnatRule := &nftables.Rule{ + Table: r.workTable, + Chain: r.chains[chainNameNATOutput], + Exprs: exprs, + UserData: []byte(ruleID), + } + r.conn.AddRule(dnatRule) + + if err := r.conn.Flush(); err != nil { + return fmt.Errorf("add output DNAT rule: %w", err) + } + + r.rules[ruleID] = dnatRule + + return nil +} + +// RemoveOutputDNAT removes an OUTPUT chain DNAT rule. +func (r *router) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + if err := r.refreshRulesMap(); err != nil { + return fmt.Errorf(refreshRulesMapError, err) + } + + ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort) + + rule, exists := r.rules[ruleID] + if !exists { + return nil + } + + if rule.Handle == 0 { + log.Warnf("output DNAT rule %s has no handle, removing stale entry", ruleID) + delete(r.rules, ruleID) + return nil + } + + if err := r.conn.DelRule(rule); err != nil { + return fmt.Errorf("delete output DNAT rule %s: %w", ruleID, err) + } + if err := r.conn.Flush(); err != nil { + return fmt.Errorf("flush delete output DNAT rule: %w", err) + } + delete(r.rules, ruleID) + + return nil +} + // applyNetwork generates nftables expressions for networks (CIDR) or sets func (r *router) applyNetwork( network firewall.Network, diff --git a/client/firewall/nftables/state_linux.go b/client/firewall/nftables/state_linux.go index 48b7b3741..462ad2556 100644 --- a/client/firewall/nftables/state_linux.go +++ b/client/firewall/nftables/state_linux.go @@ -8,10 +8,9 @@ import ( ) type InterfaceState struct { - NameStr string `json:"name"` - WGAddress wgaddr.Address `json:"wg_address"` - UserspaceBind bool `json:"userspace_bind"` - MTU uint16 `json:"mtu"` + NameStr string `json:"name"` + WGAddress wgaddr.Address `json:"wg_address"` + MTU uint16 `json:"mtu"` } func (i *InterfaceState) Name() string { @@ -22,10 +21,6 @@ func (i *InterfaceState) Address() wgaddr.Address { return i.WGAddress } -func (i *InterfaceState) IsUserspaceBind() bool { - return i.UserspaceBind -} - type ShutdownState struct { InterfaceState *InterfaceState `json:"interface_state,omitempty"` } diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go index df2e274eb..cb9e1bb0a 100644 --- a/client/firewall/uspfilter/filter.go +++ b/client/firewall/uspfilter/filter.go @@ -140,6 +140,17 @@ type Manager struct { mtu uint16 mssClampValue uint16 mssClampEnabled bool + + // Only one hook per protocol is supported. Outbound direction only. + udpHookOut atomic.Pointer[packetHook] + tcpHookOut atomic.Pointer[packetHook] +} + +// packetHook stores a registered hook for a specific IP:port. +type packetHook struct { + ip netip.Addr + port uint16 + fn func([]byte) bool } // decoder for packages @@ -594,6 +605,8 @@ func (m *Manager) resetState() { maps.Clear(m.incomingRules) maps.Clear(m.routeRulesMap) m.routeRules = m.routeRules[:0] + m.udpHookOut.Store(nil) + m.tcpHookOut.Store(nil) if m.udpTracker != nil { m.udpTracker.Close() @@ -713,6 +726,9 @@ func (m *Manager) filterOutbound(packetData []byte, size int) bool { return true } case layers.LayerTypeTCP: + if m.tcpHooksDrop(uint16(d.tcp.DstPort), dstIP, packetData) { + return true + } // Clamp MSS on all TCP SYN packets, including those from local IPs. // SNATed routed traffic may appear as local IP but still requires clamping. if m.mssClampEnabled { @@ -895,38 +911,21 @@ func (m *Manager) trackInbound(d *decoder, srcIP, dstIP netip.Addr, ruleID []byt d.dnatOrigPort = 0 } -// udpHooksDrop checks if any UDP hooks should drop the packet func (m *Manager) udpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool { - m.mutex.RLock() - defer m.mutex.RUnlock() + return hookMatches(m.udpHookOut.Load(), dstIP, dport, packetData) +} - // Check specific destination IP first - if rules, exists := m.outgoingRules[dstIP]; exists { - for _, rule := range rules { - if rule.udpHook != nil && portsMatch(rule.dPort, dport) { - return rule.udpHook(packetData) - } - } +func (m *Manager) tcpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool { + return hookMatches(m.tcpHookOut.Load(), dstIP, dport, packetData) +} + +func hookMatches(h *packetHook, dstIP netip.Addr, dport uint16, packetData []byte) bool { + if h == nil { + return false } - - // Check IPv4 unspecified address - if rules, exists := m.outgoingRules[netip.IPv4Unspecified()]; exists { - for _, rule := range rules { - if rule.udpHook != nil && portsMatch(rule.dPort, dport) { - return rule.udpHook(packetData) - } - } + if h.ip == dstIP && h.port == dport { + return h.fn(packetData) } - - // Check IPv6 unspecified address - if rules, exists := m.outgoingRules[netip.IPv6Unspecified()]; exists { - for _, rule := range rules { - if rule.udpHook != nil && portsMatch(rule.dPort, dport) { - return rule.udpHook(packetData) - } - } - } - return false } @@ -1278,12 +1277,6 @@ func validateRule(ip netip.Addr, packetData []byte, rules map[string]PeerRule, d return rule.mgmtId, rule.drop, true } case layers.LayerTypeUDP: - // if rule has UDP hook (and if we are here we match this rule) - // we ignore rule.drop and call this hook - if rule.udpHook != nil { - return rule.mgmtId, rule.udpHook(packetData), true - } - if portsMatch(rule.sPort, uint16(d.udp.SrcPort)) && portsMatch(rule.dPort, uint16(d.udp.DstPort)) { return rule.mgmtId, rule.drop, true } @@ -1342,65 +1335,30 @@ func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, prot return sourceMatched } -// AddUDPPacketHook calls hook when UDP packet from given direction matched -// -// Hook function returns flag which indicates should be the matched package dropped or not -func (m *Manager) AddUDPPacketHook(in bool, ip netip.Addr, dPort uint16, hook func(packet []byte) bool) string { - r := PeerRule{ - id: uuid.New().String(), - ip: ip, - protoLayer: layers.LayerTypeUDP, - dPort: &firewall.Port{Values: []uint16{dPort}}, - ipLayer: layers.LayerTypeIPv6, - udpHook: hook, +// SetUDPPacketHook sets the outbound UDP packet hook. Pass nil hook to remove. +func (m *Manager) SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) { + if hook == nil { + m.udpHookOut.Store(nil) + return } - - if ip.Is4() { - r.ipLayer = layers.LayerTypeIPv4 - } - - m.mutex.Lock() - if in { - // Incoming UDP hooks are stored in allow rules map - if _, ok := m.incomingRules[r.ip]; !ok { - m.incomingRules[r.ip] = make(map[string]PeerRule) - } - m.incomingRules[r.ip][r.id] = r - } else { - if _, ok := m.outgoingRules[r.ip]; !ok { - m.outgoingRules[r.ip] = make(map[string]PeerRule) - } - m.outgoingRules[r.ip][r.id] = r - } - m.mutex.Unlock() - - return r.id + m.udpHookOut.Store(&packetHook{ + ip: ip, + port: dPort, + fn: hook, + }) } -// RemovePacketHook removes packet hook by given ID -func (m *Manager) RemovePacketHook(hookID string) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - // Check incoming hooks (stored in allow rules) - for _, arr := range m.incomingRules { - for _, r := range arr { - if r.id == hookID { - delete(arr, r.id) - return nil - } - } +// SetTCPPacketHook sets the outbound TCP packet hook. Pass nil hook to remove. +func (m *Manager) SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) { + if hook == nil { + m.tcpHookOut.Store(nil) + return } - // Check outgoing hooks - for _, arr := range m.outgoingRules { - for _, r := range arr { - if r.id == hookID { - delete(arr, r.id) - return nil - } - } - } - return fmt.Errorf("hook with given id not found") + m.tcpHookOut.Store(&packetHook{ + ip: ip, + port: dPort, + fn: hook, + }) } // SetLogLevel sets the log level for the firewall manager diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go index 55a8e723c..5f0f9f860 100644 --- a/client/firewall/uspfilter/filter_test.go +++ b/client/firewall/uspfilter/filter_test.go @@ -12,6 +12,7 @@ import ( "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" wgdevice "golang.zx2c4.com/wireguard/device" @@ -186,81 +187,52 @@ func TestManagerDeleteRule(t *testing.T) { } } -func TestAddUDPPacketHook(t *testing.T) { - tests := []struct { - name string - in bool - expDir fw.RuleDirection - ip netip.Addr - dPort uint16 - hook func([]byte) bool - expectedID string - }{ - { - name: "Test Outgoing UDP Packet Hook", - in: false, - expDir: fw.RuleDirectionOUT, - ip: netip.MustParseAddr("10.168.0.1"), - dPort: 8000, - hook: func([]byte) bool { return true }, - }, - { - name: "Test Incoming UDP Packet Hook", - in: true, - expDir: fw.RuleDirectionIN, - ip: netip.MustParseAddr("::1"), - dPort: 9000, - hook: func([]byte) bool { return false }, - }, - } +func TestSetUDPPacketHook(t *testing.T) { + manager, err := Create(&IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + }, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, manager.Close(nil)) }) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - manager, err := Create(&IFaceMock{ - SetFilterFunc: func(device.PacketFilter) error { return nil }, - }, false, flowLogger, nbiface.DefaultMTU) - require.NoError(t, err) + var called bool + manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, func([]byte) bool { + called = true + return true + }) - manager.AddUDPPacketHook(tt.in, tt.ip, tt.dPort, tt.hook) + h := manager.udpHookOut.Load() + require.NotNil(t, h) + assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.ip) + assert.Equal(t, uint16(8000), h.port) + assert.True(t, h.fn(nil)) + assert.True(t, called) - var addedRule PeerRule - if tt.in { - // Incoming UDP hooks are stored in allow rules map - if len(manager.incomingRules[tt.ip]) != 1 { - t.Errorf("expected 1 incoming rule, got %d", len(manager.incomingRules[tt.ip])) - return - } - for _, rule := range manager.incomingRules[tt.ip] { - addedRule = rule - } - } else { - if len(manager.outgoingRules[tt.ip]) != 1 { - t.Errorf("expected 1 outgoing rule, got %d", len(manager.outgoingRules[tt.ip])) - return - } - for _, rule := range manager.outgoingRules[tt.ip] { - addedRule = rule - } - } + manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, nil) + assert.Nil(t, manager.udpHookOut.Load()) +} - if tt.ip.Compare(addedRule.ip) != 0 { - t.Errorf("expected ip %s, got %s", tt.ip, addedRule.ip) - return - } - if tt.dPort != addedRule.dPort.Values[0] { - t.Errorf("expected dPort %d, got %d", tt.dPort, addedRule.dPort.Values[0]) - return - } - if layers.LayerTypeUDP != addedRule.protoLayer { - t.Errorf("expected protoLayer %s, got %s", layers.LayerTypeUDP, addedRule.protoLayer) - return - } - if addedRule.udpHook == nil { - t.Errorf("expected udpHook to be set") - return - } - }) - } +func TestSetTCPPacketHook(t *testing.T) { + manager, err := Create(&IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + }, false, flowLogger, nbiface.DefaultMTU) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, manager.Close(nil)) }) + + var called bool + manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, func([]byte) bool { + called = true + return true + }) + + h := manager.tcpHookOut.Load() + require.NotNil(t, h) + assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.ip) + assert.Equal(t, uint16(53), h.port) + assert.True(t, h.fn(nil)) + assert.True(t, called) + + manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, nil) + assert.Nil(t, manager.tcpHookOut.Load()) } // TestPeerRuleLifecycleDenyRules verifies that deny rules are correctly added @@ -530,39 +502,12 @@ func TestRemovePacketHook(t *testing.T) { require.NoError(t, manager.Close(nil)) }() - // Add a UDP packet hook - hookFunc := func(data []byte) bool { return true } - hookID := manager.AddUDPPacketHook(false, netip.MustParseAddr("192.168.0.1"), 8080, hookFunc) + manager.SetUDPPacketHook(netip.MustParseAddr("192.168.0.1"), 8080, func([]byte) bool { return true }) - // Assert the hook is added by finding it in the manager's outgoing rules - found := false - for _, arr := range manager.outgoingRules { - for _, rule := range arr { - if rule.id == hookID { - found = true - break - } - } - } + require.NotNil(t, manager.udpHookOut.Load(), "hook should be registered") - if !found { - t.Fatalf("The hook was not added properly.") - } - - // Now remove the packet hook - err = manager.RemovePacketHook(hookID) - if err != nil { - t.Fatalf("Failed to remove hook: %s", err) - } - - // Assert the hook is removed by checking it in the manager's outgoing rules - for _, arr := range manager.outgoingRules { - for _, rule := range arr { - if rule.id == hookID { - t.Fatalf("The hook was not removed properly.") - } - } - } + manager.SetUDPPacketHook(netip.MustParseAddr("192.168.0.1"), 8080, nil) + assert.Nil(t, manager.udpHookOut.Load(), "hook should be removed") } func TestProcessOutgoingHooks(t *testing.T) { @@ -592,8 +537,7 @@ func TestProcessOutgoingHooks(t *testing.T) { } hookCalled := false - hookID := manager.AddUDPPacketHook( - false, + manager.SetUDPPacketHook( netip.MustParseAddr("100.10.0.100"), 53, func([]byte) bool { @@ -601,7 +545,6 @@ func TestProcessOutgoingHooks(t *testing.T) { return true }, ) - require.NotEmpty(t, hookID) // Create test UDP packet ipv4 := &layers.IPv4{ diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go index ffc807f46..f63fe3e45 100644 --- a/client/firewall/uspfilter/localip.go +++ b/client/firewall/uspfilter/localip.go @@ -144,6 +144,8 @@ func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) { if err != nil { log.Warnf("failed to get interfaces: %v", err) } else { + // TODO: filter out down interfaces (net.FlagUp). Also handle the reverse + // case where an interface comes up between refreshes. for _, intf := range interfaces { m.processInterface(intf, &newIPv4Bitmap, ipv4Set, &ipv4Addresses) } diff --git a/client/firewall/uspfilter/nat.go b/client/firewall/uspfilter/nat.go index 597f892cf..8ed32eb5e 100644 --- a/client/firewall/uspfilter/nat.go +++ b/client/firewall/uspfilter/nat.go @@ -421,6 +421,7 @@ func (m *Manager) addPortRedirection(targetIP netip.Addr, protocol gopacket.Laye } // AddInboundDNAT adds an inbound DNAT rule redirecting traffic from NetBird peers to local services. +// TODO: also delegate to nativeFirewall when available for kernel WG mode func (m *Manager) AddInboundDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { var layerType gopacket.LayerType switch protocol { @@ -466,6 +467,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot return m.removePortRedirection(localAddr, layerType, sourcePort, targetPort) } +// AddOutputDNAT delegates to the native firewall if available. +func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + if m.nativeFirewall == nil { + return fmt.Errorf("output DNAT not supported without native firewall") + } + return m.nativeFirewall.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + +// RemoveOutputDNAT delegates to the native firewall if available. +func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error { + if m.nativeFirewall == nil { + return nil + } + return m.nativeFirewall.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort) +} + // translateInboundPortDNAT applies port-specific DNAT translation to inbound packets. func (m *Manager) translateInboundPortDNAT(packetData []byte, d *decoder, srcIP, dstIP netip.Addr) bool { if !m.portDNATEnabled.Load() { diff --git a/client/firewall/uspfilter/rule.go b/client/firewall/uspfilter/rule.go index dbe3a7858..08d68a78e 100644 --- a/client/firewall/uspfilter/rule.go +++ b/client/firewall/uspfilter/rule.go @@ -18,9 +18,7 @@ type PeerRule struct { protoLayer gopacket.LayerType sPort *firewall.Port dPort *firewall.Port - drop bool - - udpHook func([]byte) bool + drop bool } // ID returns the rule id diff --git a/client/firewall/uspfilter/tracer_test.go b/client/firewall/uspfilter/tracer_test.go index d9f9f1aa8..657f96fc0 100644 --- a/client/firewall/uspfilter/tracer_test.go +++ b/client/firewall/uspfilter/tracer_test.go @@ -399,21 +399,17 @@ func TestTracePacket(t *testing.T) { { name: "UDPTraffic_WithHook", setup: func(m *Manager) { - hookFunc := func([]byte) bool { - return true - } - m.AddUDPPacketHook(true, netip.MustParseAddr("1.1.1.1"), 53, hookFunc) + m.SetUDPPacketHook(netip.MustParseAddr("100.10.255.254"), 53, func([]byte) bool { + return true // drop (intercepted by hook) + }) }, packetBuilder: func() *PacketBuilder { - return createPacketBuilder("1.1.1.1", "100.10.0.100", "udp", 12345, 53, fw.RuleDirectionIN) + return createPacketBuilder("100.10.0.100", "100.10.255.254", "udp", 12345, 53, fw.RuleDirectionOUT) }, expectedStages: []PacketStage{ StageReceived, - StageInboundPortDNAT, - StageInbound1to1NAT, - StageConntrack, - StageRouting, - StagePeerACL, + StageOutbound1to1NAT, + StageOutboundPortReverse, StageCompleted, }, expectedAllow: false, diff --git a/client/iface/device/device_filter.go b/client/iface/device/device_filter.go index 708f38d26..4357d1916 100644 --- a/client/iface/device/device_filter.go +++ b/client/iface/device/device_filter.go @@ -15,14 +15,17 @@ type PacketFilter interface { // FilterInbound filter incoming packets from external sources to host FilterInbound(packetData []byte, size int) bool - // AddUDPPacketHook calls hook when UDP packet from given direction matched - // - // Hook function returns flag which indicates should be the matched package dropped or not. - // Hook function receives raw network packet data as argument. - AddUDPPacketHook(in bool, ip netip.Addr, dPort uint16, hook func(packet []byte) bool) string + // SetUDPPacketHook registers a hook for outbound UDP packets matching the given IP and port. + // Hook function returns true if the packet should be dropped. + // Only one UDP hook is supported; calling again replaces the previous hook. + // Pass nil hook to remove. + SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) - // RemovePacketHook removes hook by ID - RemovePacketHook(hookID string) error + // SetTCPPacketHook registers a hook for outbound TCP packets matching the given IP and port. + // Hook function returns true if the packet should be dropped. + // Only one TCP hook is supported; calling again replaces the previous hook. + // Pass nil hook to remove. + SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) } // FilteredDevice to override Read or Write of packets diff --git a/client/iface/mocks/filter.go b/client/iface/mocks/filter.go index 566068aa5..5ae98039c 100644 --- a/client/iface/mocks/filter.go +++ b/client/iface/mocks/filter.go @@ -34,18 +34,28 @@ func (m *MockPacketFilter) EXPECT() *MockPacketFilterMockRecorder { return m.recorder } -// AddUDPPacketHook mocks base method. -func (m *MockPacketFilter) AddUDPPacketHook(arg0 bool, arg1 netip.Addr, arg2 uint16, arg3 func([]byte) bool) string { +// SetUDPPacketHook mocks base method. +func (m *MockPacketFilter) SetUDPPacketHook(arg0 netip.Addr, arg1 uint16, arg2 func([]byte) bool) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddUDPPacketHook", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(string) - return ret0 + m.ctrl.Call(m, "SetUDPPacketHook", arg0, arg1, arg2) } -// AddUDPPacketHook indicates an expected call of AddUDPPacketHook. -func (mr *MockPacketFilterMockRecorder) AddUDPPacketHook(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// SetUDPPacketHook indicates an expected call of SetUDPPacketHook. +func (mr *MockPacketFilterMockRecorder) SetUDPPacketHook(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).AddUDPPacketHook), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).SetUDPPacketHook), arg0, arg1, arg2) +} + +// SetTCPPacketHook mocks base method. +func (m *MockPacketFilter) SetTCPPacketHook(arg0 netip.Addr, arg1 uint16, arg2 func([]byte) bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTCPPacketHook", arg0, arg1, arg2) +} + +// SetTCPPacketHook indicates an expected call of SetTCPPacketHook. +func (mr *MockPacketFilterMockRecorder) SetTCPPacketHook(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTCPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).SetTCPPacketHook), arg0, arg1, arg2) } // FilterInbound mocks base method. @@ -75,17 +85,3 @@ func (mr *MockPacketFilterMockRecorder) FilterOutbound(arg0 interface{}, arg1 an mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterOutbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterOutbound), arg0, arg1) } - -// RemovePacketHook mocks base method. -func (m *MockPacketFilter) RemovePacketHook(arg0 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemovePacketHook", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemovePacketHook indicates an expected call of RemovePacketHook. -func (mr *MockPacketFilterMockRecorder) RemovePacketHook(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePacketHook", reflect.TypeOf((*MockPacketFilter)(nil).RemovePacketHook), arg0) -} diff --git a/client/iface/mocks/iface/mocks/filter.go b/client/iface/mocks/iface/mocks/filter.go deleted file mode 100644 index 291ab9ab5..000000000 --- a/client/iface/mocks/iface/mocks/filter.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/netbirdio/netbird/client/iface (interfaces: PacketFilter) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - net "net" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockPacketFilter is a mock of PacketFilter interface. -type MockPacketFilter struct { - ctrl *gomock.Controller - recorder *MockPacketFilterMockRecorder -} - -// MockPacketFilterMockRecorder is the mock recorder for MockPacketFilter. -type MockPacketFilterMockRecorder struct { - mock *MockPacketFilter -} - -// NewMockPacketFilter creates a new mock instance. -func NewMockPacketFilter(ctrl *gomock.Controller) *MockPacketFilter { - mock := &MockPacketFilter{ctrl: ctrl} - mock.recorder = &MockPacketFilterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPacketFilter) EXPECT() *MockPacketFilterMockRecorder { - return m.recorder -} - -// AddUDPPacketHook mocks base method. -func (m *MockPacketFilter) AddUDPPacketHook(arg0 bool, arg1 net.IP, arg2 uint16, arg3 func(*net.UDPAddr, []byte) bool) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddUDPPacketHook", arg0, arg1, arg2, arg3) -} - -// AddUDPPacketHook indicates an expected call of AddUDPPacketHook. -func (mr *MockPacketFilterMockRecorder) AddUDPPacketHook(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).AddUDPPacketHook), arg0, arg1, arg2, arg3) -} - -// FilterInbound mocks base method. -func (m *MockPacketFilter) FilterInbound(arg0 []byte) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterInbound", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// FilterInbound indicates an expected call of FilterInbound. -func (mr *MockPacketFilterMockRecorder) FilterInbound(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterInbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterInbound), arg0) -} - -// FilterOutbound mocks base method. -func (m *MockPacketFilter) FilterOutbound(arg0 []byte) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FilterOutbound", arg0) - ret0, _ := ret[0].(bool) - return ret0 -} - -// FilterOutbound indicates an expected call of FilterOutbound. -func (mr *MockPacketFilterMockRecorder) FilterOutbound(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterOutbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterOutbound), arg0) -} - -// SetNetwork mocks base method. -func (m *MockPacketFilter) SetNetwork(arg0 *net.IPNet) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNetwork", arg0) -} - -// SetNetwork indicates an expected call of SetNetwork. -func (mr *MockPacketFilterMockRecorder) SetNetwork(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNetwork", reflect.TypeOf((*MockPacketFilter)(nil).SetNetwork), arg0) -} diff --git a/client/internal/acl/manager_test.go b/client/internal/acl/manager_test.go index bd7adfaef..408ed992f 100644 --- a/client/internal/acl/manager_test.go +++ b/client/internal/acl/manager_test.go @@ -19,6 +19,9 @@ import ( var flowLogger = netflow.NewManager(nil, []byte{}, nil).GetLogger() func TestDefaultManager(t *testing.T) { + t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") + networkMap := &mgmProto.NetworkMap{ FirewallRules: []*mgmProto.FirewallRule{ { @@ -135,6 +138,7 @@ func TestDefaultManager(t *testing.T) { func TestDefaultManagerStateless(t *testing.T) { // stateless currently only in userspace, so we have to disable kernel t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") t.Setenv("NB_DISABLE_CONNTRACK", "true") networkMap := &mgmProto.NetworkMap{ @@ -194,6 +198,7 @@ func TestDefaultManagerStateless(t *testing.T) { // This tests the full ACL manager -> uspfilter integration. func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) { t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") networkMap := &mgmProto.NetworkMap{ FirewallRules: []*mgmProto.FirewallRule{ @@ -258,6 +263,7 @@ func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) { // up when they're removed from the network map in a subsequent update. func TestDenyRulesCleanedUpOnRemoval(t *testing.T) { t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -339,6 +345,7 @@ func TestDenyRulesCleanedUpOnRemoval(t *testing.T) { // one added without leaking. func TestRuleUpdateChangingAction(t *testing.T) { t.Setenv("NB_WG_KERNEL_DISABLED", "true") + t.Setenv(firewall.EnvForceUserspaceFirewall, "true") ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go index 44e98bede..bdfd07430 100644 --- a/client/internal/auth/auth.go +++ b/client/internal/auth/auth.go @@ -155,7 +155,7 @@ func (a *Auth) IsLoginRequired(ctx context.Context) (bool, error) { var needsLogin bool err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { - _, _, err := a.doMgmLogin(client, ctx, pubSSHKey) + err := a.doMgmLogin(client, ctx, pubSSHKey) if isLoginNeeded(err) { needsLogin = true return nil @@ -179,8 +179,8 @@ func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (err var isAuthError bool err = a.withRetry(ctx, func(client *mgm.GrpcClient) error { - serverKey, _, err := a.doMgmLogin(client, ctx, pubSSHKey) - if serverKey != nil && isRegistrationNeeded(err) { + err := a.doMgmLogin(client, ctx, pubSSHKey) + if isRegistrationNeeded(err) { log.Debugf("peer registration required") _, err = a.registerPeer(client, ctx, setupKey, jwtToken, pubSSHKey) if err != nil { @@ -201,13 +201,7 @@ func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (err // getPKCEFlow retrieves PKCE authorization flow configuration and creates a flow instance func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, error) { - serverKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, err - } - - protoFlow, err := client.GetPKCEAuthorizationFlow(*serverKey) + protoFlow, err := client.GetPKCEAuthorizationFlow() if err != nil { if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { log.Warnf("server couldn't find pkce flow, contact admin: %v", err) @@ -221,7 +215,7 @@ func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, erro config := &PKCEAuthProviderConfig{ Audience: protoConfig.GetAudience(), ClientID: protoConfig.GetClientID(), - ClientSecret: protoConfig.GetClientSecret(), + ClientSecret: protoConfig.GetClientSecret(), //nolint:staticcheck TokenEndpoint: protoConfig.GetTokenEndpoint(), AuthorizationEndpoint: protoConfig.GetAuthorizationEndpoint(), Scope: protoConfig.GetScope(), @@ -246,13 +240,7 @@ func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, erro // getDeviceFlow retrieves device authorization flow configuration and creates a flow instance func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, error) { - serverKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, err - } - - protoFlow, err := client.GetDeviceAuthorizationFlow(*serverKey) + protoFlow, err := client.GetDeviceAuthorizationFlow() if err != nil { if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { log.Warnf("server couldn't find device flow, contact admin: %v", err) @@ -266,7 +254,7 @@ func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, config := &DeviceAuthProviderConfig{ Audience: protoConfig.GetAudience(), ClientID: protoConfig.GetClientID(), - ClientSecret: protoConfig.GetClientSecret(), + ClientSecret: protoConfig.GetClientSecret(), //nolint:staticcheck Domain: protoConfig.Domain, TokenEndpoint: protoConfig.GetTokenEndpoint(), DeviceAuthEndpoint: protoConfig.GetDeviceAuthEndpoint(), @@ -292,28 +280,16 @@ func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, } // doMgmLogin performs the actual login operation with the management service -func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) (*wgtypes.Key, *mgmProto.LoginResponse, error) { - serverKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, nil, err - } - +func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) error { sysInfo := system.GetInfo(ctx) a.setSystemInfoFlags(sysInfo) - loginResp, err := client.Login(*serverKey, sysInfo, pubSSHKey, a.config.DNSLabels) - return serverKey, loginResp, err + _, err := client.Login(sysInfo, pubSSHKey, a.config.DNSLabels) + return err } // registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key. // Otherwise tries to register with the provided setupKey via command line. func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) { - serverPublicKey, err := client.GetServerPublicKey() - if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, err - } - validSetupKey, err := uuid.Parse(setupKey) if err != nil && jwtToken == "" { return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err) @@ -322,7 +298,7 @@ func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKe log.Debugf("sending peer registration request to Management Service") info := system.GetInfo(ctx) a.setSystemInfoFlags(info) - loginResp, err := client.Register(*serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels) + loginResp, err := client.Register(validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels) if err != nil { log.Errorf("failed registering peer %v", err) return nil, err diff --git a/client/internal/connect.go b/client/internal/connect.go index 242b25b44..bc2bd84d9 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -44,6 +44,10 @@ import ( "github.com/netbirdio/netbird/version" ) +// androidRunOverride is set on Android to inject mobile dependencies +// when using embed.Client (which calls Run() with empty MobileDependency). +var androidRunOverride func(c *ConnectClient, runningChan chan struct{}, logPath string) error + type ConnectClient struct { ctx context.Context config *profilemanager.Config @@ -76,6 +80,9 @@ func (c *ConnectClient) SetUpdateManager(um *updater.Manager) { // Run with main logic. func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error { + if androidRunOverride != nil { + return androidRunOverride(c, runningChan, logPath) + } return c.run(MobileDependency{}, runningChan, logPath) } @@ -104,6 +111,7 @@ func (c *ConnectClient) RunOniOS( fileDescriptor int32, networkChangeListener listener.NetworkChangeListener, dnsManager dns.IosDnsManager, + dnsAddresses []netip.AddrPort, stateFilePath string, ) error { // Set GC percent to 5% to reduce memory usage as iOS only allows 50MB of memory for the extension. @@ -113,6 +121,7 @@ func (c *ConnectClient) RunOniOS( FileDescriptor: fileDescriptor, NetworkChangeListener: networkChangeListener, DnsManager: dnsManager, + HostDNSAddresses: dnsAddresses, StateFilePath: stateFilePath, } return c.run(mobileDependency, nil, "") @@ -610,12 +619,6 @@ func connectToSignal(ctx context.Context, wtConfig *mgmProto.NetbirdConfig, ourP // loginToManagement creates Management ServiceDependencies client, establishes a connection, logs-in and gets a global Netbird config (signal, turn, stun hosts, etc) func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte, config *profilemanager.Config) (*mgmProto.LoginResponse, error) { - - serverPublicKey, err := client.GetServerPublicKey() - if err != nil { - return nil, gstatus.Errorf(codes.FailedPrecondition, "failed while getting Management Service public key: %s", err) - } - sysInfo := system.GetInfo(ctx) sysInfo.SetFlags( config.RosenpassEnabled, @@ -634,12 +637,7 @@ func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte, config.EnableSSHRemotePortForwarding, config.DisableSSHAuth, ) - loginResp, err := client.Login(*serverPublicKey, sysInfo, pubSSHKey, config.DNSLabels) - if err != nil { - return nil, err - } - - return loginResp, nil + return client.Login(sysInfo, pubSSHKey, config.DNSLabels) } func statusRecorderToMgmConnStateNotifier(statusRecorder *peer.Status) mgm.ConnStateNotifier { diff --git a/client/internal/connect_android_default.go b/client/internal/connect_android_default.go new file mode 100644 index 000000000..190341c4a --- /dev/null +++ b/client/internal/connect_android_default.go @@ -0,0 +1,73 @@ +//go:build android + +package internal + +import ( + "net/netip" + + "github.com/netbirdio/netbird/client/internal/dns" + "github.com/netbirdio/netbird/client/internal/listener" + "github.com/netbirdio/netbird/client/internal/stdnet" +) + +// noopIFaceDiscover is a stub ExternalIFaceDiscover for embed.Client on Android. +// It returns an empty interface list, which means ICE P2P candidates won't be +// discovered — connections will fall back to relay. Applications that need P2P +// should provide a real implementation via runOnAndroidEmbed that uses +// Android's ConnectivityManager to enumerate network interfaces. +type noopIFaceDiscover struct{} + +func (noopIFaceDiscover) IFaces() (string, error) { + // Return empty JSON array — no local interfaces advertised for ICE. + // This is intentional: without Android's ConnectivityManager, we cannot + // reliably enumerate interfaces (netlink is restricted on Android 11+). + // Relay connections still work; only P2P hole-punching is disabled. + return "[]", nil +} + +// noopNetworkChangeListener is a stub for embed.Client on Android. +// Network change events are ignored since the embed client manages its own +// reconnection logic via the engine's built-in retry mechanism. +type noopNetworkChangeListener struct{} + +func (noopNetworkChangeListener) OnNetworkChanged(string) { + // No-op: embed.Client relies on the engine's internal reconnection + // logic rather than OS-level network change notifications. +} + +func (noopNetworkChangeListener) SetInterfaceIP(string) { + // No-op: in netstack mode, the overlay IP is managed by the userspace + // network stack, not by OS-level interface configuration. +} + +// noopDnsReadyListener is a stub for embed.Client on Android. +// DNS readiness notifications are not needed in netstack/embed mode +// since system DNS is disabled and DNS resolution happens externally. +type noopDnsReadyListener struct{} + +func (noopDnsReadyListener) OnReady() { + // No-op: embed.Client does not need DNS readiness notifications. + // System DNS is disabled in netstack mode. +} + +var _ stdnet.ExternalIFaceDiscover = noopIFaceDiscover{} +var _ listener.NetworkChangeListener = noopNetworkChangeListener{} +var _ dns.ReadyListener = noopDnsReadyListener{} + +func init() { + // Wire up the default override so embed.Client.Start() works on Android + // with netstack mode. Provides complete no-op stubs for all mobile + // dependencies so the engine's existing Android code paths work unchanged. + // Applications that need P2P ICE or real DNS should replace this by + // setting androidRunOverride before calling Start(). + androidRunOverride = func(c *ConnectClient, runningChan chan struct{}, logPath string) error { + return c.runOnAndroidEmbed( + noopIFaceDiscover{}, + noopNetworkChangeListener{}, + []netip.AddrPort{}, + noopDnsReadyListener{}, + runningChan, + logPath, + ) + } +} diff --git a/client/internal/connect_android_embed.go b/client/internal/connect_android_embed.go new file mode 100644 index 000000000..18f72e841 --- /dev/null +++ b/client/internal/connect_android_embed.go @@ -0,0 +1,32 @@ +//go:build android + +package internal + +import ( + "net/netip" + + "github.com/netbirdio/netbird/client/internal/dns" + "github.com/netbirdio/netbird/client/internal/listener" + "github.com/netbirdio/netbird/client/internal/stdnet" +) + +// runOnAndroidEmbed is like RunOnAndroid but accepts a runningChan +// so embed.Client.Start() can detect when the engine is ready. +// It provides complete MobileDependency so the engine's existing +// Android code paths work unchanged. +func (c *ConnectClient) runOnAndroidEmbed( + iFaceDiscover stdnet.ExternalIFaceDiscover, + networkChangeListener listener.NetworkChangeListener, + dnsAddresses []netip.AddrPort, + dnsReadyListener dns.ReadyListener, + runningChan chan struct{}, + logPath string, +) error { + mobileDependency := MobileDependency{ + IFaceDiscover: iFaceDiscover, + NetworkChangeListener: networkChangeListener, + HostDNSAddresses: dnsAddresses, + DnsReadyListener: dnsReadyListener, + } + return c.run(mobileDependency, runningChan, logPath) +} diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go index c9ebf25e5..6a8eae324 100644 --- a/client/internal/debug/debug.go +++ b/client/internal/debug/debug.go @@ -25,6 +25,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" "github.com/netbirdio/netbird/client/anonymize" + "github.com/netbirdio/netbird/client/configs" "github.com/netbirdio/netbird/client/internal/peer" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/updater/installer" @@ -52,6 +53,7 @@ resolved_domains.txt: Anonymized resolved domain IP addresses from the status re config.txt: Anonymized configuration information of the NetBird client. network_map.json: Anonymized sync response containing peer configurations, routes, DNS settings, and firewall rules. state.json: Anonymized client state dump containing netbird states for the active profile. +service_params.json: Sanitized service install parameters (service.json). Sensitive environment variable values are masked. Only present when service.json exists. metrics.txt: Buffered client metrics in InfluxDB line protocol format. Only present when metrics collection is enabled. Peer identifiers are anonymized. mutex.prof: Mutex profiling information. goroutine.prof: Goroutine profiling information. @@ -359,6 +361,10 @@ func (g *BundleGenerator) createArchive() error { log.Errorf("failed to add corrupted state files to debug bundle: %v", err) } + if err := g.addServiceParams(); err != nil { + log.Errorf("failed to add service params to debug bundle: %v", err) + } + if err := g.addMetrics(); err != nil { log.Errorf("failed to add metrics to debug bundle: %v", err) } @@ -488,6 +494,90 @@ func (g *BundleGenerator) addConfig() error { return nil } +const ( + serviceParamsFile = "service.json" + serviceParamsBundle = "service_params.json" + maskedValue = "***" + envVarPrefix = "NB_" + jsonKeyManagementURL = "management_url" + jsonKeyServiceEnv = "service_env_vars" +) + +var sensitiveEnvSubstrings = []string{"key", "token", "secret", "password", "credential"} + +// addServiceParams reads the service.json file and adds a sanitized version to the bundle. +// Non-NB_ env vars and vars with sensitive names are masked. Other NB_ values are anonymized. +func (g *BundleGenerator) addServiceParams() error { + path := filepath.Join(configs.StateDir, serviceParamsFile) + + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("read service params: %w", err) + } + + var params map[string]any + if err := json.Unmarshal(data, ¶ms); err != nil { + return fmt.Errorf("parse service params: %w", err) + } + + if g.anonymize { + if mgmtURL, ok := params[jsonKeyManagementURL].(string); ok && mgmtURL != "" { + params[jsonKeyManagementURL] = g.anonymizer.AnonymizeURI(mgmtURL) + } + } + + g.sanitizeServiceEnvVars(params) + + sanitizedData, err := json.MarshalIndent(params, "", " ") + if err != nil { + return fmt.Errorf("marshal sanitized service params: %w", err) + } + + if err := g.addFileToZip(bytes.NewReader(sanitizedData), serviceParamsBundle); err != nil { + return fmt.Errorf("add service params to zip: %w", err) + } + + return nil +} + +// sanitizeServiceEnvVars masks or anonymizes env var values in service params. +// Non-NB_ vars and vars with sensitive names (key, token, etc.) are fully masked. +// Other NB_ var values are passed through the anonymizer when anonymization is enabled. +func (g *BundleGenerator) sanitizeServiceEnvVars(params map[string]any) { + envVars, ok := params[jsonKeyServiceEnv].(map[string]any) + if !ok { + return + } + + sanitized := make(map[string]any, len(envVars)) + for k, v := range envVars { + val, _ := v.(string) + switch { + case !strings.HasPrefix(k, envVarPrefix) || isSensitiveEnvVar(k): + sanitized[k] = maskedValue + case g.anonymize: + sanitized[k] = g.anonymizer.AnonymizeString(val) + default: + sanitized[k] = val + } + } + params[jsonKeyServiceEnv] = sanitized +} + +// isSensitiveEnvVar returns true for env var names that may contain secrets. +func isSensitiveEnvVar(key string) bool { + lower := strings.ToLower(key) + for _, s := range sensitiveEnvSubstrings { + if strings.Contains(lower, s) { + return true + } + } + return false +} + func (g *BundleGenerator) addCommonConfigFields(configContent *strings.Builder) { configContent.WriteString("NetBird Client Configuration:\n\n") diff --git a/client/internal/debug/debug_test.go b/client/internal/debug/debug_test.go index 59837c328..6b5bb911c 100644 --- a/client/internal/debug/debug_test.go +++ b/client/internal/debug/debug_test.go @@ -1,8 +1,12 @@ package debug import ( + "archive/zip" + "bytes" "encoding/json" "net" + "os" + "path/filepath" "strings" "testing" @@ -10,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/netbirdio/netbird/client/anonymize" + "github.com/netbirdio/netbird/client/configs" mgmProto "github.com/netbirdio/netbird/shared/management/proto" ) @@ -420,6 +425,226 @@ func TestAnonymizeNetworkMap(t *testing.T) { } } +func TestIsSensitiveEnvVar(t *testing.T) { + tests := []struct { + key string + sensitive bool + }{ + {"NB_SETUP_KEY", true}, + {"NB_API_TOKEN", true}, + {"NB_CLIENT_SECRET", true}, + {"NB_PASSWORD", true}, + {"NB_CREDENTIAL", true}, + {"NB_LOG_LEVEL", false}, + {"NB_MANAGEMENT_URL", false}, + {"NB_HOSTNAME", false}, + {"HOME", false}, + {"PATH", false}, + } + for _, tt := range tests { + t.Run(tt.key, func(t *testing.T) { + assert.Equal(t, tt.sensitive, isSensitiveEnvVar(tt.key)) + }) + } +} + +func TestSanitizeServiceEnvVars(t *testing.T) { + tests := []struct { + name string + anonymize bool + input map[string]any + check func(t *testing.T, params map[string]any) + }{ + { + name: "no env vars key", + anonymize: false, + input: map[string]any{"management_url": "https://mgmt.example.com"}, + check: func(t *testing.T, params map[string]any) { + t.Helper() + assert.Equal(t, "https://mgmt.example.com", params["management_url"], "non-env fields should be untouched") + _, ok := params[jsonKeyServiceEnv] + assert.False(t, ok, "service_env_vars should not be added") + }, + }, + { + name: "non-NB vars are masked", + anonymize: false, + input: map[string]any{ + jsonKeyServiceEnv: map[string]any{ + "HOME": "/root", + "PATH": "/usr/bin", + "NB_LOG_LEVEL": "debug", + }, + }, + check: func(t *testing.T, params map[string]any) { + t.Helper() + env := params[jsonKeyServiceEnv].(map[string]any) + assert.Equal(t, maskedValue, env["HOME"], "non-NB_ var should be masked") + assert.Equal(t, maskedValue, env["PATH"], "non-NB_ var should be masked") + assert.Equal(t, "debug", env["NB_LOG_LEVEL"], "safe NB_ var should pass through") + }, + }, + { + name: "sensitive NB vars are masked", + anonymize: false, + input: map[string]any{ + jsonKeyServiceEnv: map[string]any{ + "NB_SETUP_KEY": "abc123", + "NB_API_TOKEN": "tok_xyz", + "NB_LOG_LEVEL": "info", + }, + }, + check: func(t *testing.T, params map[string]any) { + t.Helper() + env := params[jsonKeyServiceEnv].(map[string]any) + assert.Equal(t, maskedValue, env["NB_SETUP_KEY"], "sensitive NB_ var should be masked") + assert.Equal(t, maskedValue, env["NB_API_TOKEN"], "sensitive NB_ var should be masked") + assert.Equal(t, "info", env["NB_LOG_LEVEL"], "safe NB_ var should pass through") + }, + }, + { + name: "safe NB vars anonymized when anonymize is true", + anonymize: true, + input: map[string]any{ + jsonKeyServiceEnv: map[string]any{ + "NB_MANAGEMENT_URL": "https://mgmt.example.com:443", + "NB_LOG_LEVEL": "debug", + "NB_SETUP_KEY": "secret", + "SOME_OTHER": "val", + }, + }, + check: func(t *testing.T, params map[string]any) { + t.Helper() + env := params[jsonKeyServiceEnv].(map[string]any) + // Safe NB_ values should be anonymized (not the original, not masked) + mgmtVal := env["NB_MANAGEMENT_URL"].(string) + assert.NotEqual(t, "https://mgmt.example.com:443", mgmtVal, "should be anonymized") + assert.NotEqual(t, maskedValue, mgmtVal, "should not be masked") + + logVal := env["NB_LOG_LEVEL"].(string) + assert.NotEqual(t, maskedValue, logVal, "safe NB_ var should not be masked") + + // Sensitive and non-NB_ still masked + assert.Equal(t, maskedValue, env["NB_SETUP_KEY"]) + assert.Equal(t, maskedValue, env["SOME_OTHER"]) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + anonymizer := anonymize.NewAnonymizer(anonymize.DefaultAddresses()) + g := &BundleGenerator{ + anonymize: tt.anonymize, + anonymizer: anonymizer, + } + g.sanitizeServiceEnvVars(tt.input) + tt.check(t, tt.input) + }) + } +} + +func TestAddServiceParams(t *testing.T) { + t.Run("missing service.json returns nil", func(t *testing.T) { + g := &BundleGenerator{ + anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()), + } + + origStateDir := configs.StateDir + configs.StateDir = t.TempDir() + t.Cleanup(func() { configs.StateDir = origStateDir }) + + err := g.addServiceParams() + assert.NoError(t, err) + }) + + t.Run("management_url anonymized when anonymize is true", func(t *testing.T) { + dir := t.TempDir() + origStateDir := configs.StateDir + configs.StateDir = dir + t.Cleanup(func() { configs.StateDir = origStateDir }) + + input := map[string]any{ + jsonKeyManagementURL: "https://api.example.com:443", + jsonKeyServiceEnv: map[string]any{ + "NB_LOG_LEVEL": "trace", + }, + } + data, err := json.Marshal(input) + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(dir, serviceParamsFile), data, 0600)) + + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + + g := &BundleGenerator{ + anonymize: true, + anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()), + archive: zw, + } + + require.NoError(t, g.addServiceParams()) + require.NoError(t, zw.Close()) + + zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + require.NoError(t, err) + require.Len(t, zr.File, 1) + assert.Equal(t, serviceParamsBundle, zr.File[0].Name) + + rc, err := zr.File[0].Open() + require.NoError(t, err) + defer rc.Close() + + var result map[string]any + require.NoError(t, json.NewDecoder(rc).Decode(&result)) + + mgmt := result[jsonKeyManagementURL].(string) + assert.NotEqual(t, "https://api.example.com:443", mgmt, "management_url should be anonymized") + assert.NotEmpty(t, mgmt) + + env := result[jsonKeyServiceEnv].(map[string]any) + assert.NotEqual(t, maskedValue, env["NB_LOG_LEVEL"], "safe NB_ var should not be masked") + }) + + t.Run("management_url preserved when anonymize is false", func(t *testing.T) { + dir := t.TempDir() + origStateDir := configs.StateDir + configs.StateDir = dir + t.Cleanup(func() { configs.StateDir = origStateDir }) + + input := map[string]any{ + jsonKeyManagementURL: "https://api.example.com:443", + } + data, err := json.Marshal(input) + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(dir, serviceParamsFile), data, 0600)) + + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + + g := &BundleGenerator{ + anonymize: false, + anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()), + archive: zw, + } + + require.NoError(t, g.addServiceParams()) + require.NoError(t, zw.Close()) + + zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + require.NoError(t, err) + + rc, err := zr.File[0].Open() + require.NoError(t, err) + defer rc.Close() + + var result map[string]any + require.NoError(t, json.NewDecoder(rc).Decode(&result)) + + assert.Equal(t, "https://api.example.com:443", result[jsonKeyManagementURL], "management_url should be preserved") + }) +} + // Helper function to check if IP is in CGNAT range func isInCGNATRange(ip net.IP) bool { cgnat := net.IPNet{ diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go index 06a2056b1..6fbdedc59 100644 --- a/client/internal/dns/handler_chain.go +++ b/client/internal/dns/handler_chain.go @@ -73,6 +73,9 @@ func (w *ResponseWriterChain) WriteMsg(m *dns.Msg) error { return nil } w.response = m + if m.MsgHdr.Truncated { + w.SetMeta("truncated", "true") + } return w.ResponseWriter.WriteMsg(m) } @@ -195,10 +198,14 @@ func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { startTime := time.Now() requestID := resutil.GenerateRequestID() - logger := log.WithFields(log.Fields{ + fields := log.Fields{ "request_id": requestID, "dns_id": fmt.Sprintf("%04x", r.Id), - }) + } + if addr := w.RemoteAddr(); addr != nil { + fields["client"] = addr.String() + } + logger := log.WithFields(fields) question := r.Question[0] qname := strings.ToLower(question.Name) @@ -261,9 +268,9 @@ func (c *HandlerChain) logResponse(logger *log.Entry, cw *ResponseWriterChain, q meta += " " + k + "=" + v } - logger.Tracef("response: domain=%s rcode=%s answers=%s%s took=%s", + logger.Tracef("response: domain=%s rcode=%s answers=%s size=%dB%s took=%s", qname, dns.RcodeToString[cw.response.Rcode], resutil.FormatAnswers(cw.response.Answer), - meta, time.Since(startTime)) + cw.response.Len(), meta, time.Since(startTime)) } func (c *HandlerChain) isHandlerMatch(qname string, entry HandlerEntry) bool { diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go index 73f70035f..2c6b7dbc3 100644 --- a/client/internal/dns/local/local_test.go +++ b/client/internal/dns/local/local_test.go @@ -1263,9 +1263,9 @@ func TestLocalResolver_AuthoritativeFlag(t *testing.T) { }) } -// TestLocalResolver_Stop tests cleanup on Stop +// TestLocalResolver_Stop tests cleanup on GracefullyStop func TestLocalResolver_Stop(t *testing.T) { - t.Run("Stop clears all state", func(t *testing.T) { + t.Run("GracefullyStop clears all state", func(t *testing.T) { resolver := NewResolver() resolver.Update([]nbdns.CustomZone{{ Domain: "example.com.", @@ -1285,7 +1285,7 @@ func TestLocalResolver_Stop(t *testing.T) { assert.False(t, resolver.isInManagedZone("host.example.com.")) }) - t.Run("Stop is safe to call multiple times", func(t *testing.T) { + t.Run("GracefullyStop is safe to call multiple times", func(t *testing.T) { resolver := NewResolver() resolver.Update([]nbdns.CustomZone{{ Domain: "example.com.", @@ -1299,7 +1299,7 @@ func TestLocalResolver_Stop(t *testing.T) { resolver.Stop() }) - t.Run("Stop cancels in-flight external resolution", func(t *testing.T) { + t.Run("GracefullyStop cancels in-flight external resolution", func(t *testing.T) { resolver := NewResolver() lookupStarted := make(chan struct{}) diff --git a/client/internal/dns/mock_server.go b/client/internal/dns/mock_server.go index fe160e20a..548b1f54f 100644 --- a/client/internal/dns/mock_server.go +++ b/client/internal/dns/mock_server.go @@ -85,6 +85,16 @@ func (m *MockServer) PopulateManagementDomain(mgmtURL *url.URL) error { return nil } +// SetRouteChecker mock implementation of SetRouteChecker from Server interface +func (m *MockServer) SetRouteChecker(func(netip.Addr) bool) { + // Mock implementation - no-op +} + +// SetFirewall mock implementation of SetFirewall from Server interface +func (m *MockServer) SetFirewall(Firewall) { + // Mock implementation - no-op +} + // BeginBatch mock implementation of BeginBatch from Server interface func (m *MockServer) BeginBatch() { // Mock implementation - no-op diff --git a/client/internal/dns/response_writer.go b/client/internal/dns/response_writer.go index edc65a5d9..287cf28b0 100644 --- a/client/internal/dns/response_writer.go +++ b/client/internal/dns/response_writer.go @@ -104,3 +104,23 @@ func (r *responseWriter) TsigTimersOnly(bool) { // After a call to Hijack(), the DNS package will not do anything with the connection. func (r *responseWriter) Hijack() { } + +// remoteAddrFromPacket extracts the source IP:port from a decoded packet for logging. +func remoteAddrFromPacket(packet gopacket.Packet) *net.UDPAddr { + var srcIP net.IP + if ipv4 := packet.Layer(layers.LayerTypeIPv4); ipv4 != nil { + srcIP = ipv4.(*layers.IPv4).SrcIP + } else if ipv6 := packet.Layer(layers.LayerTypeIPv6); ipv6 != nil { + srcIP = ipv6.(*layers.IPv6).SrcIP + } + + var srcPort int + if udp := packet.Layer(layers.LayerTypeUDP); udp != nil { + srcPort = int(udp.(*layers.UDP).SrcPort) + } + + if srcIP == nil { + return nil + } + return &net.UDPAddr{IP: srcIP, Port: srcPort} +} diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 6ca4f7957..f7865047b 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -57,6 +57,8 @@ type Server interface { ProbeAvailability() UpdateServerConfig(domains dnsconfig.ServerDomains) error PopulateManagementDomain(mgmtURL *url.URL) error + SetRouteChecker(func(netip.Addr) bool) + SetFirewall(Firewall) } type nsGroupsByDomain struct { @@ -104,6 +106,7 @@ type DefaultServer struct { statusRecorder *peer.Status stateManager *statemanager.Manager + routeMatch func(netip.Addr) bool probeMu sync.Mutex probeCancel context.CancelFunc @@ -149,7 +152,7 @@ func NewDefaultServer(ctx context.Context, config DefaultServerConfig) (*Default if config.WgInterface.IsUserspaceBind() { dnsService = NewServiceViaMemory(config.WgInterface) } else { - dnsService = newServiceViaListener(config.WgInterface, addrPort) + dnsService = newServiceViaListener(config.WgInterface, addrPort, nil) } server := newDefaultServer(ctx, config.WgInterface, dnsService, config.StatusRecorder, config.StateManager, config.DisableSys) @@ -184,11 +187,16 @@ func NewDefaultServerIos( ctx context.Context, wgInterface WGIface, iosDnsManager IosDnsManager, + hostsDnsList []netip.AddrPort, statusRecorder *peer.Status, disableSys bool, ) *DefaultServer { + log.Debugf("iOS host dns address list is: %v", hostsDnsList) ds := newDefaultServer(ctx, wgInterface, NewServiceViaMemory(wgInterface), statusRecorder, nil, disableSys) ds.iosDnsManager = iosDnsManager + ds.hostsDNSHolder.set(hostsDnsList) + ds.permanent = true + ds.addHostRootZone() return ds } @@ -229,6 +237,14 @@ func newDefaultServer( return defaultServer } +// SetRouteChecker sets the function used by upstream resolvers to determine +// whether an IP is routed through the tunnel. +func (s *DefaultServer) SetRouteChecker(f func(netip.Addr) bool) { + s.mux.Lock() + defer s.mux.Unlock() + s.routeMatch = f +} + // RegisterHandler registers a handler for the given domains with the given priority. // Any previously registered handler for the same domain and priority will be replaced. func (s *DefaultServer) RegisterHandler(domains domain.List, handler dns.Handler, priority int) { @@ -364,6 +380,17 @@ func (s *DefaultServer) DnsIP() netip.Addr { return s.service.RuntimeIP() } +// SetFirewall sets the firewall used for DNS port DNAT rules. +// This must be called before Initialize when using the listener-based service, +// because the firewall is typically not available at construction time. +func (s *DefaultServer) SetFirewall(fw Firewall) { + if svc, ok := s.service.(*serviceViaListener); ok { + svc.listenerFlagLock.Lock() + svc.firewall = fw + svc.listenerFlagLock.Unlock() + } +} + // Stop stops the server func (s *DefaultServer) Stop() { s.probeMu.Lock() @@ -385,8 +412,12 @@ func (s *DefaultServer) Stop() { maps.Clear(s.extraDomains) } -func (s *DefaultServer) disableDNS() error { - defer s.service.Stop() +func (s *DefaultServer) disableDNS() (retErr error) { + defer func() { + if err := s.service.Stop(); err != nil { + retErr = errors.Join(retErr, fmt.Errorf("stop DNS service: %w", err)) + } + }() if s.isUsingNoopHostManager() { return nil @@ -743,6 +774,7 @@ func (s *DefaultServer) registerFallback(config HostDNSConfig) { log.Errorf("failed to create upstream resolver for original nameservers: %v", err) return } + handler.routeMatch = s.routeMatch for _, ns := range originalNameservers { if ns == config.ServerIP { @@ -852,6 +884,7 @@ func (s *DefaultServer) createHandlersForDomainGroup(domainGroup nsGroupsByDomai if err != nil { return nil, fmt.Errorf("create upstream resolver: %v", err) } + handler.routeMatch = s.routeMatch for _, ns := range nsGroup.NameServers { if ns.NSType != nbdns.UDPNameServerType { @@ -1036,6 +1069,7 @@ func (s *DefaultServer) addHostRootZone() { log.Errorf("unable to create a new upstream resolver, error: %v", err) return } + handler.routeMatch = s.routeMatch handler.upstreamServers = maps.Keys(hostDNSServers) handler.deactivate = func(error) {} diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index d3b0c250d..f77f6e898 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -476,8 +476,8 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) { packetfilter := pfmock.NewMockPacketFilter(ctrl) packetfilter.EXPECT().FilterOutbound(gomock.Any(), gomock.Any()).AnyTimes() - packetfilter.EXPECT().AddUDPPacketHook(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - packetfilter.EXPECT().RemovePacketHook(gomock.Any()) + packetfilter.EXPECT().SetUDPPacketHook(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + packetfilter.EXPECT().SetTCPPacketHook(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() if err := wgIface.SetFilter(packetfilter); err != nil { t.Errorf("set packet filter: %v", err) @@ -1071,7 +1071,7 @@ func (m *mockHandler) ID() types.HandlerID { return types.Hand type mockService struct{} func (m *mockService) Listen() error { return nil } -func (m *mockService) Stop() {} +func (m *mockService) Stop() error { return nil } func (m *mockService) RuntimeIP() netip.Addr { return netip.MustParseAddr("127.0.0.1") } func (m *mockService) RuntimePort() int { return 53 } func (m *mockService) RegisterMux(string, dns.Handler) {} diff --git a/client/internal/dns/service.go b/client/internal/dns/service.go index 6a76c53e3..1c6ce7849 100644 --- a/client/internal/dns/service.go +++ b/client/internal/dns/service.go @@ -4,15 +4,25 @@ import ( "net/netip" "github.com/miekg/dns" + + firewall "github.com/netbirdio/netbird/client/firewall/manager" ) const ( DefaultPort = 53 ) +// Firewall provides DNAT capabilities for DNS port redirection. +// This is used when the DNS server cannot bind port 53 directly +// and needs firewall rules to redirect traffic. +type Firewall interface { + AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error + RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error +} + type service interface { Listen() error - Stop() + Stop() error RegisterMux(domain string, handler dns.Handler) DeregisterMux(key string) RuntimePort() int diff --git a/client/internal/dns/service_listener.go b/client/internal/dns/service_listener.go index f7ddfd40f..4e09f1b7f 100644 --- a/client/internal/dns/service_listener.go +++ b/client/internal/dns/service_listener.go @@ -10,9 +10,13 @@ import ( "sync" "time" + "github.com/hashicorp/go-multierror" "github.com/miekg/dns" log "github.com/sirupsen/logrus" + nberrors "github.com/netbirdio/netbird/client/errors" + + firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/internal/ebpf" ebpfMgr "github.com/netbirdio/netbird/client/internal/ebpf/manager" ) @@ -31,25 +35,33 @@ type serviceViaListener struct { dnsMux *dns.ServeMux customAddr *netip.AddrPort server *dns.Server + tcpServer *dns.Server listenIP netip.Addr listenPort uint16 listenerIsRunning bool listenerFlagLock sync.Mutex ebpfService ebpfMgr.Manager + firewall Firewall + tcpDNATConfigured bool } -func newServiceViaListener(wgIface WGIface, customAddr *netip.AddrPort) *serviceViaListener { +func newServiceViaListener(wgIface WGIface, customAddr *netip.AddrPort, fw Firewall) *serviceViaListener { mux := dns.NewServeMux() s := &serviceViaListener{ wgInterface: wgIface, dnsMux: mux, customAddr: customAddr, + firewall: fw, server: &dns.Server{ Net: "udp", Handler: mux, UDPSize: 65535, }, + tcpServer: &dns.Server{ + Net: "tcp", + Handler: mux, + }, } return s @@ -70,43 +82,86 @@ func (s *serviceViaListener) Listen() error { return fmt.Errorf("eval listen address: %w", err) } s.listenIP = s.listenIP.Unmap() - s.server.Addr = net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort))) - log.Debugf("starting dns on %s", s.server.Addr) - go func() { - s.setListenerStatus(true) - defer s.setListenerStatus(false) + addr := net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort))) + s.server.Addr = addr + s.tcpServer.Addr = addr - err := s.server.ListenAndServe() - if err != nil { - log.Errorf("dns server running with %d port returned an error: %v. Will not retry", s.listenPort, err) + log.Debugf("starting dns on %s (UDP + TCP)", addr) + s.listenerIsRunning = true + + go func() { + if err := s.server.ListenAndServe(); err != nil { + log.Errorf("failed to run DNS UDP server on port %d: %v", s.listenPort, err) + } + + s.listenerFlagLock.Lock() + unexpected := s.listenerIsRunning + s.listenerIsRunning = false + s.listenerFlagLock.Unlock() + + if unexpected { + if err := s.tcpServer.Shutdown(); err != nil { + log.Debugf("failed to shutdown DNS TCP server: %v", err) + } } }() + go func() { + if err := s.tcpServer.ListenAndServe(); err != nil { + log.Errorf("failed to run DNS TCP server on port %d: %v", s.listenPort, err) + } + }() + + // When eBPF redirects UDP port 53 to our listen port, TCP still needs + // a DNAT rule because eBPF only handles UDP. + if s.ebpfService != nil && s.firewall != nil && s.listenPort != DefaultPort { + if err := s.firewall.AddOutputDNAT(s.listenIP, firewall.ProtocolTCP, DefaultPort, s.listenPort); err != nil { + log.Warnf("failed to add DNS TCP DNAT rule, TCP DNS on port 53 will not work: %v", err) + } else { + s.tcpDNATConfigured = true + log.Infof("added DNS TCP DNAT rule: %s:%d -> %s:%d", s.listenIP, DefaultPort, s.listenIP, s.listenPort) + } + } + return nil } -func (s *serviceViaListener) Stop() { +func (s *serviceViaListener) Stop() error { s.listenerFlagLock.Lock() defer s.listenerFlagLock.Unlock() if !s.listenerIsRunning { - return + return nil } + s.listenerIsRunning = false ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - err := s.server.ShutdownContext(ctx) - if err != nil { - log.Errorf("stopping dns server listener returned an error: %v", err) + var merr *multierror.Error + + if err := s.server.ShutdownContext(ctx); err != nil { + merr = multierror.Append(merr, fmt.Errorf("stop DNS UDP server: %w", err)) + } + + if err := s.tcpServer.ShutdownContext(ctx); err != nil { + merr = multierror.Append(merr, fmt.Errorf("stop DNS TCP server: %w", err)) + } + + if s.tcpDNATConfigured && s.firewall != nil { + if err := s.firewall.RemoveOutputDNAT(s.listenIP, firewall.ProtocolTCP, DefaultPort, s.listenPort); err != nil { + merr = multierror.Append(merr, fmt.Errorf("remove DNS TCP DNAT rule: %w", err)) + } + s.tcpDNATConfigured = false } if s.ebpfService != nil { - err = s.ebpfService.FreeDNSFwd() - if err != nil { - log.Errorf("stopping traffic forwarder returned an error: %v", err) + if err := s.ebpfService.FreeDNSFwd(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("stop traffic forwarder: %w", err)) } } + + return nberrors.FormatErrorOrNil(merr) } func (s *serviceViaListener) RegisterMux(pattern string, handler dns.Handler) { @@ -133,12 +188,6 @@ func (s *serviceViaListener) RuntimeIP() netip.Addr { return s.listenIP } -func (s *serviceViaListener) setListenerStatus(running bool) { - s.listenerFlagLock.Lock() - defer s.listenerFlagLock.Unlock() - - s.listenerIsRunning = running -} // evalListenAddress figure out the listen address for the DNS server // first check the 53 port availability on WG interface or lo, if not success @@ -187,18 +236,28 @@ func (s *serviceViaListener) testFreePort(port int) (netip.Addr, bool) { } func (s *serviceViaListener) tryToBind(ip netip.Addr, port int) bool { - addrString := net.JoinHostPort(ip.String(), strconv.Itoa(port)) - udpAddr := net.UDPAddrFromAddrPort(netip.MustParseAddrPort(addrString)) - probeListener, err := net.ListenUDP("udp", udpAddr) + addrPort := netip.AddrPortFrom(ip, uint16(port)) + + udpAddr := net.UDPAddrFromAddrPort(addrPort) + udpLn, err := net.ListenUDP("udp", udpAddr) if err != nil { - log.Warnf("binding dns on %s is not available, error: %s", addrString, err) + log.Warnf("binding dns UDP on %s is not available: %s", addrPort, err) return false } - - err = probeListener.Close() - if err != nil { - log.Errorf("got an error closing the probe listener, error: %s", err) + if err := udpLn.Close(); err != nil { + log.Debugf("close UDP probe listener: %s", err) } + + tcpAddr := net.TCPAddrFromAddrPort(addrPort) + tcpLn, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + log.Warnf("binding dns TCP on %s is not available: %s", addrPort, err) + return false + } + if err := tcpLn.Close(); err != nil { + log.Debugf("close TCP probe listener: %s", err) + } + return true } diff --git a/client/internal/dns/service_listener_test.go b/client/internal/dns/service_listener_test.go new file mode 100644 index 000000000..90ef71d19 --- /dev/null +++ b/client/internal/dns/service_listener_test.go @@ -0,0 +1,86 @@ +package dns + +import ( + "fmt" + "net" + "net/netip" + "testing" + "time" + + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestServiceViaListener_TCPAndUDP(t *testing.T) { + handler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("192.0.2.1"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + // Create a service using a custom address to avoid needing root + svc := newServiceViaListener(nil, nil, nil) + svc.dnsMux.Handle(".", handler) + + // Bind both transports up front to avoid TOCTOU races. + udpAddr := net.UDPAddrFromAddrPort(netip.AddrPortFrom(customIP, 0)) + udpConn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + t.Skip("cannot bind to 127.0.0.153, skipping") + } + port := uint16(udpConn.LocalAddr().(*net.UDPAddr).Port) + + tcpAddr := net.TCPAddrFromAddrPort(netip.AddrPortFrom(customIP, port)) + tcpLn, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + udpConn.Close() + t.Skip("cannot bind TCP on same port, skipping") + } + + addr := fmt.Sprintf("%s:%d", customIP, port) + svc.server.PacketConn = udpConn + svc.tcpServer.Listener = tcpLn + svc.listenIP = customIP + svc.listenPort = port + + go func() { + if err := svc.server.ActivateAndServe(); err != nil { + t.Logf("udp server: %v", err) + } + }() + go func() { + if err := svc.tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + svc.listenerIsRunning = true + + defer func() { + require.NoError(t, svc.Stop()) + }() + + q := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + // Test UDP query + udpClient := &dns.Client{Net: "udp", Timeout: 2 * time.Second} + udpResp, _, err := udpClient.Exchange(q, addr) + require.NoError(t, err, "UDP query should succeed") + require.NotNil(t, udpResp) + require.NotEmpty(t, udpResp.Answer) + assert.Contains(t, udpResp.Answer[0].String(), "192.0.2.1", "UDP response should contain expected IP") + + // Test TCP query + tcpClient := &dns.Client{Net: "tcp", Timeout: 2 * time.Second} + tcpResp, _, err := tcpClient.Exchange(q, addr) + require.NoError(t, err, "TCP query should succeed") + require.NotNil(t, tcpResp) + require.NotEmpty(t, tcpResp.Answer) + assert.Contains(t, tcpResp.Answer[0].String(), "192.0.2.1", "TCP response should contain expected IP") +} diff --git a/client/internal/dns/service_memory.go b/client/internal/dns/service_memory.go index 6ef0ab526..e8c036076 100644 --- a/client/internal/dns/service_memory.go +++ b/client/internal/dns/service_memory.go @@ -1,6 +1,7 @@ package dns import ( + "errors" "fmt" "net/netip" "sync" @@ -10,6 +11,7 @@ import ( "github.com/miekg/dns" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/client/iface" nbnet "github.com/netbirdio/netbird/client/net" ) @@ -18,7 +20,8 @@ type ServiceViaMemory struct { dnsMux *dns.ServeMux runtimeIP netip.Addr runtimePort int - udpFilterHookID string + tcpDNS *tcpDNSServer + tcpHookSet bool listenerIsRunning bool listenerFlagLock sync.Mutex } @@ -28,14 +31,13 @@ func NewServiceViaMemory(wgIface WGIface) *ServiceViaMemory { if err != nil { log.Errorf("get last ip from network: %v", err) } - s := &ServiceViaMemory{ + + return &ServiceViaMemory{ wgInterface: wgIface, dnsMux: dns.NewServeMux(), - runtimeIP: lastIP, runtimePort: DefaultPort, } - return s } func (s *ServiceViaMemory) Listen() error { @@ -46,10 +48,8 @@ func (s *ServiceViaMemory) Listen() error { return nil } - var err error - s.udpFilterHookID, err = s.filterDNSTraffic() - if err != nil { - return fmt.Errorf("filter dns traffice: %w", err) + if err := s.filterDNSTraffic(); err != nil { + return fmt.Errorf("filter dns traffic: %w", err) } s.listenerIsRunning = true @@ -57,19 +57,29 @@ func (s *ServiceViaMemory) Listen() error { return nil } -func (s *ServiceViaMemory) Stop() { +func (s *ServiceViaMemory) Stop() error { s.listenerFlagLock.Lock() defer s.listenerFlagLock.Unlock() if !s.listenerIsRunning { - return + return nil } - if err := s.wgInterface.GetFilter().RemovePacketHook(s.udpFilterHookID); err != nil { - log.Errorf("unable to remove DNS packet hook: %s", err) + filter := s.wgInterface.GetFilter() + if filter != nil { + filter.SetUDPPacketHook(s.runtimeIP, uint16(s.runtimePort), nil) + if s.tcpHookSet { + filter.SetTCPPacketHook(s.runtimeIP, uint16(s.runtimePort), nil) + } + } + + if s.tcpDNS != nil { + s.tcpDNS.Stop() } s.listenerIsRunning = false + + return nil } func (s *ServiceViaMemory) RegisterMux(pattern string, handler dns.Handler) { @@ -88,10 +98,18 @@ func (s *ServiceViaMemory) RuntimeIP() netip.Addr { return s.runtimeIP } -func (s *ServiceViaMemory) filterDNSTraffic() (string, error) { +func (s *ServiceViaMemory) filterDNSTraffic() error { filter := s.wgInterface.GetFilter() if filter == nil { - return "", fmt.Errorf("can't set DNS filter, filter not initialized") + return errors.New("DNS filter not initialized") + } + + // Create TCP DNS server lazily here since the device may not exist at construction time. + if s.tcpDNS == nil { + if dev := s.wgInterface.GetDevice(); dev != nil { + // MTU only affects TCP segment sizing; DNS messages are small so this has no practical impact. + s.tcpDNS = newTCPDNSServer(s.dnsMux, dev.Device, s.runtimeIP, uint16(s.runtimePort), iface.DefaultMTU) + } } firstLayerDecoder := layers.LayerTypeIPv4 @@ -100,12 +118,16 @@ func (s *ServiceViaMemory) filterDNSTraffic() (string, error) { } hook := func(packetData []byte) bool { - // Decode the packet packet := gopacket.NewPacket(packetData, firstLayerDecoder, gopacket.Default) - // Get the UDP layer udpLayer := packet.Layer(layers.LayerTypeUDP) - udp := udpLayer.(*layers.UDP) + if udpLayer == nil { + return true + } + udp, ok := udpLayer.(*layers.UDP) + if !ok { + return true + } msg := new(dns.Msg) if err := msg.Unpack(udp.Payload); err != nil { @@ -113,13 +135,30 @@ func (s *ServiceViaMemory) filterDNSTraffic() (string, error) { return true } - writer := responseWriter{ - packet: packet, - device: s.wgInterface.GetDevice().Device, + dev := s.wgInterface.GetDevice() + if dev == nil { + return true } - go s.dnsMux.ServeDNS(&writer, msg) + + writer := &responseWriter{ + remote: remoteAddrFromPacket(packet), + packet: packet, + device: dev.Device, + } + go s.dnsMux.ServeDNS(writer, msg) return true } - return filter.AddUDPPacketHook(false, s.runtimeIP, uint16(s.runtimePort), hook), nil + filter.SetUDPPacketHook(s.runtimeIP, uint16(s.runtimePort), hook) + + if s.tcpDNS != nil { + tcpHook := func(packetData []byte) bool { + s.tcpDNS.InjectPacket(packetData) + return true + } + filter.SetTCPPacketHook(s.runtimeIP, uint16(s.runtimePort), tcpHook) + s.tcpHookSet = true + } + + return nil } diff --git a/client/internal/dns/tcpstack.go b/client/internal/dns/tcpstack.go new file mode 100644 index 000000000..88e72e767 --- /dev/null +++ b/client/internal/dns/tcpstack.go @@ -0,0 +1,444 @@ +package dns + +import ( + "errors" + "fmt" + "io" + "net" + "net/netip" + "sync" + "sync/atomic" + "time" + + "github.com/miekg/dns" + log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/tun" + "gvisor.dev/gvisor/pkg/buffer" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" + "gvisor.dev/gvisor/pkg/waiter" +) + +const ( + dnsTCPReceiveWindow = 8192 + dnsTCPMaxInFlight = 16 + dnsTCPIdleTimeout = 30 * time.Second + dnsTCPReadTimeout = 5 * time.Second +) + +// tcpDNSServer is an on-demand TCP DNS server backed by a minimal gvisor stack. +// It is started lazily when a truncated DNS response is detected and shuts down +// after a period of inactivity to conserve resources. +type tcpDNSServer struct { + mu sync.Mutex + s *stack.Stack + ep *dnsEndpoint + mux *dns.ServeMux + tunDev tun.Device + ip netip.Addr + port uint16 + mtu uint16 + + running bool + closed bool + timerID uint64 + timer *time.Timer +} + +func newTCPDNSServer(mux *dns.ServeMux, tunDev tun.Device, ip netip.Addr, port uint16, mtu uint16) *tcpDNSServer { + return &tcpDNSServer{ + mux: mux, + tunDev: tunDev, + ip: ip, + port: port, + mtu: mtu, + } +} + +// InjectPacket ensures the stack is running and delivers a raw IP packet into +// the gvisor stack for TCP processing. Combining both operations under a single +// lock prevents a race where the idle timer could stop the stack between +// start and delivery. +func (t *tcpDNSServer) InjectPacket(payload []byte) { + t.mu.Lock() + defer t.mu.Unlock() + + if t.closed { + return + } + + if !t.running { + if err := t.startLocked(); err != nil { + log.Errorf("failed to start TCP DNS stack: %v", err) + return + } + t.running = true + log.Debugf("TCP DNS stack started on %s:%d (triggered by %s)", t.ip, t.port, srcAddrFromPacket(payload)) + } + t.resetTimerLocked() + + ep := t.ep + if ep == nil || ep.dispatcher == nil { + return + } + + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(payload), + }) + // DeliverNetworkPacket takes ownership of the packet buffer; do not DecRef. + ep.dispatcher.DeliverNetworkPacket(ipv4.ProtocolNumber, pkt) +} + +// Stop tears down the gvisor stack and releases resources permanently. +// After Stop, InjectPacket becomes a no-op. +func (t *tcpDNSServer) Stop() { + t.mu.Lock() + defer t.mu.Unlock() + + t.stopLocked() + t.closed = true +} + +func (t *tcpDNSServer) startLocked() error { + // TODO: add ipv6.NewProtocol when IPv6 overlay support lands. + s := stack.New(stack.Options{ + NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol}, + TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol}, + HandleLocal: false, + }) + + nicID := tcpip.NICID(1) + ep := &dnsEndpoint{ + tunDev: t.tunDev, + } + ep.mtu.Store(uint32(t.mtu)) + + if err := s.CreateNIC(nicID, ep); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("create NIC: %v", err) + } + + protoAddr := tcpip.ProtocolAddress{ + Protocol: ipv4.ProtocolNumber, + AddressWithPrefix: tcpip.AddressWithPrefix{ + Address: tcpip.AddrFromSlice(t.ip.AsSlice()), + PrefixLen: 32, + }, + } + if err := s.AddProtocolAddress(nicID, protoAddr, stack.AddressProperties{}); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("add protocol address: %s", err) + } + + if err := s.SetPromiscuousMode(nicID, true); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("set promiscuous mode: %s", err) + } + if err := s.SetSpoofing(nicID, true); err != nil { + s.Close() + s.Wait() + return fmt.Errorf("set spoofing: %s", err) + } + + defaultSubnet, err := tcpip.NewSubnet( + tcpip.AddrFrom4([4]byte{0, 0, 0, 0}), + tcpip.MaskFromBytes([]byte{0, 0, 0, 0}), + ) + if err != nil { + s.Close() + s.Wait() + return fmt.Errorf("create default subnet: %w", err) + } + + s.SetRouteTable([]tcpip.Route{ + {Destination: defaultSubnet, NIC: nicID}, + }) + + tcpFwd := tcp.NewForwarder(s, dnsTCPReceiveWindow, dnsTCPMaxInFlight, func(r *tcp.ForwarderRequest) { + t.handleTCPDNS(r) + }) + s.SetTransportProtocolHandler(tcp.ProtocolNumber, tcpFwd.HandlePacket) + + t.s = s + t.ep = ep + return nil +} + +func (t *tcpDNSServer) stopLocked() { + if !t.running { + return + } + + if t.timer != nil { + t.timer.Stop() + t.timer = nil + } + + if t.s != nil { + t.s.Close() + t.s.Wait() + t.s = nil + } + t.ep = nil + t.running = false + + log.Debugf("TCP DNS stack stopped") +} + +func (t *tcpDNSServer) resetTimerLocked() { + if t.timer != nil { + t.timer.Stop() + } + t.timerID++ + id := t.timerID + t.timer = time.AfterFunc(dnsTCPIdleTimeout, func() { + t.mu.Lock() + defer t.mu.Unlock() + + // Only stop if this timer is still the active one. + // A racing InjectPacket may have replaced it. + if t.timerID != id { + return + } + t.stopLocked() + }) +} + +func (t *tcpDNSServer) handleTCPDNS(r *tcp.ForwarderRequest) { + id := r.ID() + + wq := waiter.Queue{} + ep, epErr := r.CreateEndpoint(&wq) + if epErr != nil { + log.Debugf("TCP DNS: failed to create endpoint: %v", epErr) + r.Complete(true) + return + } + r.Complete(false) + + conn := gonet.NewTCPConn(&wq, ep) + defer func() { + if err := conn.Close(); err != nil { + log.Tracef("TCP DNS: close conn: %v", err) + } + }() + + // Reset idle timer on activity + t.mu.Lock() + t.resetTimerLocked() + t.mu.Unlock() + + localAddr := &net.TCPAddr{ + IP: id.LocalAddress.AsSlice(), + Port: int(id.LocalPort), + } + remoteAddr := &net.TCPAddr{ + IP: id.RemoteAddress.AsSlice(), + Port: int(id.RemotePort), + } + + for { + if err := conn.SetReadDeadline(time.Now().Add(dnsTCPReadTimeout)); err != nil { + log.Debugf("TCP DNS: set deadline for %s: %v", remoteAddr, err) + break + } + + msg, err := readTCPDNSMessage(conn) + if err != nil { + if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) { + log.Debugf("TCP DNS: read from %s: %v", remoteAddr, err) + } + break + } + + writer := &tcpResponseWriter{ + conn: conn, + localAddr: localAddr, + remoteAddr: remoteAddr, + } + t.mux.ServeDNS(writer, msg) + } +} + +// dnsEndpoint implements stack.LinkEndpoint for writing packets back via the tun device. +type dnsEndpoint struct { + dispatcher stack.NetworkDispatcher + tunDev tun.Device + mtu atomic.Uint32 +} + +func (e *dnsEndpoint) Attach(dispatcher stack.NetworkDispatcher) { e.dispatcher = dispatcher } +func (e *dnsEndpoint) IsAttached() bool { return e.dispatcher != nil } +func (e *dnsEndpoint) MTU() uint32 { return e.mtu.Load() } +func (e *dnsEndpoint) Capabilities() stack.LinkEndpointCapabilities { return stack.CapabilityNone } +func (e *dnsEndpoint) MaxHeaderLength() uint16 { return 0 } +func (e *dnsEndpoint) LinkAddress() tcpip.LinkAddress { return "" } +func (e *dnsEndpoint) Wait() { /* no async work */ } +func (e *dnsEndpoint) ARPHardwareType() header.ARPHardwareType { return header.ARPHardwareNone } +func (e *dnsEndpoint) AddHeader(*stack.PacketBuffer) { /* IP-level endpoint, no link header */ } +func (e *dnsEndpoint) ParseHeader(*stack.PacketBuffer) bool { return true } +func (e *dnsEndpoint) Close() { /* lifecycle managed by tcpDNSServer */ } +func (e *dnsEndpoint) SetLinkAddress(tcpip.LinkAddress) { /* no link address for tun */ } +func (e *dnsEndpoint) SetMTU(mtu uint32) { e.mtu.Store(mtu) } +func (e *dnsEndpoint) SetOnCloseAction(func()) { /* not needed */ } + +const tunPacketOffset = 40 + +func (e *dnsEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { + var written int + for _, pkt := range pkts.AsSlice() { + data := stack.PayloadSince(pkt.NetworkHeader()) + if data == nil { + continue + } + + raw := data.AsSlice() + buf := make([]byte, tunPacketOffset, tunPacketOffset+len(raw)) + buf = append(buf, raw...) + data.Release() + + if _, err := e.tunDev.Write([][]byte{buf}, tunPacketOffset); err != nil { + log.Tracef("TCP DNS endpoint: failed to write packet: %v", err) + continue + } + written++ + } + return written, nil +} + +// tcpResponseWriter implements dns.ResponseWriter for TCP DNS connections. +type tcpResponseWriter struct { + conn *gonet.TCPConn + localAddr net.Addr + remoteAddr net.Addr +} + +func (w *tcpResponseWriter) LocalAddr() net.Addr { + return w.localAddr +} + +func (w *tcpResponseWriter) RemoteAddr() net.Addr { + return w.remoteAddr +} + +func (w *tcpResponseWriter) WriteMsg(msg *dns.Msg) error { + data, err := msg.Pack() + if err != nil { + return fmt.Errorf("pack: %w", err) + } + + // DNS TCP: 2-byte length prefix + message + buf := make([]byte, 2+len(data)) + buf[0] = byte(len(data) >> 8) + buf[1] = byte(len(data)) + copy(buf[2:], data) + + if _, err = w.conn.Write(buf); err != nil { + return err + } + return nil +} + +func (w *tcpResponseWriter) Write(data []byte) (int, error) { + buf := make([]byte, 2+len(data)) + buf[0] = byte(len(data) >> 8) + buf[1] = byte(len(data)) + copy(buf[2:], data) + if _, err := w.conn.Write(buf); err != nil { + return 0, err + } + return len(data), nil +} + +func (w *tcpResponseWriter) Close() error { + return w.conn.Close() +} + +func (w *tcpResponseWriter) TsigStatus() error { return nil } +func (w *tcpResponseWriter) TsigTimersOnly(bool) { /* TSIG not supported */ } +func (w *tcpResponseWriter) Hijack() { /* not supported */ } + +// readTCPDNSMessage reads a single DNS message from a TCP connection (length-prefixed). +func readTCPDNSMessage(conn *gonet.TCPConn) (*dns.Msg, error) { + // DNS over TCP uses a 2-byte length prefix + lenBuf := make([]byte, 2) + if _, err := io.ReadFull(conn, lenBuf); err != nil { + return nil, fmt.Errorf("read length: %w", err) + } + + msgLen := int(lenBuf[0])<<8 | int(lenBuf[1]) + if msgLen == 0 || msgLen > 65535 { + return nil, fmt.Errorf("invalid message length: %d", msgLen) + } + + msgBuf := make([]byte, msgLen) + if _, err := io.ReadFull(conn, msgBuf); err != nil { + return nil, fmt.Errorf("read message: %w", err) + } + + msg := new(dns.Msg) + if err := msg.Unpack(msgBuf); err != nil { + return nil, fmt.Errorf("unpack: %w", err) + } + return msg, nil +} + +// srcAddrFromPacket extracts the source IP:port from a raw IP+TCP packet for logging. +// Supports both IPv4 and IPv6. +func srcAddrFromPacket(pkt []byte) netip.AddrPort { + if len(pkt) == 0 { + return netip.AddrPort{} + } + + srcIP, transportOffset := srcIPFromPacket(pkt) + if !srcIP.IsValid() || len(pkt) < transportOffset+2 { + return netip.AddrPort{} + } + + srcPort := uint16(pkt[transportOffset])<<8 | uint16(pkt[transportOffset+1]) + return netip.AddrPortFrom(srcIP.Unmap(), srcPort) +} + +func srcIPFromPacket(pkt []byte) (netip.Addr, int) { + switch header.IPVersion(pkt) { + case 4: + return srcIPv4(pkt) + case 6: + return srcIPv6(pkt) + default: + return netip.Addr{}, 0 + } +} + +func srcIPv4(pkt []byte) (netip.Addr, int) { + if len(pkt) < header.IPv4MinimumSize { + return netip.Addr{}, 0 + } + hdr := header.IPv4(pkt) + src := hdr.SourceAddress() + ip, ok := netip.AddrFromSlice(src.AsSlice()) + if !ok { + return netip.Addr{}, 0 + } + return ip, int(hdr.HeaderLength()) +} + +func srcIPv6(pkt []byte) (netip.Addr, int) { + if len(pkt) < header.IPv6MinimumSize { + return netip.Addr{}, 0 + } + hdr := header.IPv6(pkt) + src := hdr.SourceAddress() + ip, ok := netip.AddrFromSlice(src.AsSlice()) + if !ok { + return netip.Addr{}, 0 + } + return ip, header.IPv6MinimumSize +} diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index 18128a942..746b73ca7 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -41,10 +41,61 @@ const ( reactivatePeriod = 30 * time.Second probeTimeout = 2 * time.Second + + // ipv6HeaderSize + udpHeaderSize, used to derive the maximum DNS UDP + // payload from the tunnel MTU. + ipUDPHeaderSize = 60 + 8 ) const testRecord = "com." +const ( + protoUDP = "udp" + protoTCP = "tcp" +) + +type dnsProtocolKey struct{} + +// contextWithDNSProtocol stores the inbound DNS protocol ("udp" or "tcp") in context. +func contextWithDNSProtocol(ctx context.Context, network string) context.Context { + return context.WithValue(ctx, dnsProtocolKey{}, network) +} + +// dnsProtocolFromContext retrieves the inbound DNS protocol from context. +func dnsProtocolFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + if v, ok := ctx.Value(dnsProtocolKey{}).(string); ok { + return v + } + return "" +} + +type upstreamProtocolKey struct{} + +// upstreamProtocolResult holds the protocol used for the upstream exchange. +// Stored as a pointer in context so the exchange function can set it. +type upstreamProtocolResult struct { + protocol string +} + +// contextWithupstreamProtocolResult stores a mutable result holder in the context. +func contextWithupstreamProtocolResult(ctx context.Context) (context.Context, *upstreamProtocolResult) { + r := &upstreamProtocolResult{} + return context.WithValue(ctx, upstreamProtocolKey{}, r), r +} + +// setUpstreamProtocol sets the upstream protocol on the result holder in context, if present. +func setUpstreamProtocol(ctx context.Context, protocol string) { + if ctx == nil { + return + } + if r, ok := ctx.Value(upstreamProtocolKey{}).(*upstreamProtocolResult); ok && r != nil { + r.protocol = protocol + } +} + type upstreamClient interface { exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error) } @@ -70,6 +121,7 @@ type upstreamResolverBase struct { deactivate func(error) reactivate func() statusRecorder *peer.Status + routeMatch func(netip.Addr) bool } type upstreamFailure struct { @@ -137,7 +189,16 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { return } - ok, failures := u.tryUpstreamServers(w, r, logger) + // Propagate inbound protocol so upstream exchange can use TCP directly + // when the request came in over TCP. + ctx := u.ctx + if addr := w.RemoteAddr(); addr != nil { + network := addr.Network() + ctx = contextWithDNSProtocol(ctx, network) + resutil.SetMeta(w, "protocol", network) + } + + ok, failures := u.tryUpstreamServers(ctx, w, r, logger) if len(failures) > 0 { u.logUpstreamFailures(r.Question[0].Name, failures, ok, logger) } @@ -152,7 +213,7 @@ func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) { } } -func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) { +func (u *upstreamResolverBase) tryUpstreamServers(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) { timeout := u.upstreamTimeout if len(u.upstreamServers) > 1 { maxTotal := 5 * time.Second @@ -167,7 +228,7 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M var failures []upstreamFailure for _, upstream := range u.upstreamServers { - if failure := u.queryUpstream(w, r, upstream, timeout, logger); failure != nil { + if failure := u.queryUpstream(ctx, w, r, upstream, timeout, logger); failure != nil { failures = append(failures, *failure) } else { return true, failures @@ -177,15 +238,17 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M } // queryUpstream queries a single upstream server. Returns nil on success, or failure info to try next upstream. -func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure { +func (u *upstreamResolverBase) queryUpstream(parentCtx context.Context, w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure { var rm *dns.Msg var t time.Duration var err error var startTime time.Time + var upstreamProto *upstreamProtocolResult func() { - ctx, cancel := context.WithTimeout(u.ctx, timeout) + ctx, cancel := context.WithTimeout(parentCtx, timeout) defer cancel() + ctx, upstreamProto = contextWithupstreamProtocolResult(ctx) startTime = time.Now() rm, t, err = u.upstreamClient.exchange(ctx, upstream.String(), r) }() @@ -202,7 +265,7 @@ func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, u return &upstreamFailure{upstream: upstream, reason: dns.RcodeToString[rm.Rcode]} } - u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger) + u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, upstreamProto, logger) return nil } @@ -219,10 +282,13 @@ func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.Add return &upstreamFailure{upstream: upstream, reason: reason} } -func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, logger *log.Entry) bool { +func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, upstreamProto *upstreamProtocolResult, logger *log.Entry) bool { u.successCount.Add(1) resutil.SetMeta(w, "upstream", upstream.String()) + if upstreamProto != nil && upstreamProto.protocol != "" { + resutil.SetMeta(w, "upstream_protocol", upstreamProto.protocol) + } // Clear Zero bit from external responses to prevent upstream servers from // manipulating our internal fallthrough signaling mechanism @@ -427,13 +493,42 @@ func (u *upstreamResolverBase) testNameserver(baseCtx context.Context, externalC return err } +// clientUDPMaxSize returns the maximum UDP response size the client accepts. +func clientUDPMaxSize(r *dns.Msg) int { + if opt := r.IsEdns0(); opt != nil { + return int(opt.UDPSize()) + } + return dns.MinMsgSize +} + // ExchangeWithFallback exchanges a DNS message with the upstream server. // It first tries to use UDP, and if it is truncated, it falls back to TCP. +// If the inbound request came over TCP (via context), it skips the UDP attempt. // If the passed context is nil, this will use Exchange instead of ExchangeContext. func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, upstream string) (*dns.Msg, time.Duration, error) { - // MTU - ip + udp headers - // Note: this could be sent out on an interface that is not ours, but higher MTU settings could break truncation handling. - client.UDPSize = uint16(currentMTU - (60 + 8)) + // If the request came in over TCP, go straight to TCP upstream. + if dnsProtocolFromContext(ctx) == protoTCP { + tcpClient := *client + tcpClient.Net = protoTCP + rm, t, err := tcpClient.ExchangeContext(ctx, r, upstream) + if err != nil { + return nil, t, fmt.Errorf("with tcp: %w", err) + } + setUpstreamProtocol(ctx, protoTCP) + return rm, t, nil + } + + clientMaxSize := clientUDPMaxSize(r) + + // Cap EDNS0 to our tunnel MTU so the upstream doesn't send a + // response larger than our read buffer. + // Note: the query could be sent out on an interface that is not ours, + // but higher MTU settings could break truncation handling. + maxUDPPayload := uint16(currentMTU - ipUDPHeaderSize) + client.UDPSize = maxUDPPayload + if opt := r.IsEdns0(); opt != nil && opt.UDPSize() > maxUDPPayload { + opt.SetUDPSize(maxUDPPayload) + } var ( rm *dns.Msg @@ -452,25 +547,32 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u } if rm == nil || !rm.MsgHdr.Truncated { + setUpstreamProtocol(ctx, protoUDP) return rm, t, nil } - log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP.", - r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) + // TODO: if the upstream's truncated UDP response already contains more + // data than the client's buffer, we could truncate locally and skip + // the TCP retry. - client.Net = "tcp" + tcpClient := *client + tcpClient.Net = protoTCP if ctx == nil { - rm, t, err = client.Exchange(r, upstream) + rm, t, err = tcpClient.Exchange(r, upstream) } else { - rm, t, err = client.ExchangeContext(ctx, r, upstream) + rm, t, err = tcpClient.ExchangeContext(ctx, r, upstream) } if err != nil { return nil, t, fmt.Errorf("with tcp: %w", err) } - // TODO: once TCP is implemented, rm.Truncate() if the request came in over UDP + setUpstreamProtocol(ctx, protoTCP) + + if rm.Len() > clientMaxSize { + rm.Truncate(clientMaxSize) + } return rm, t, nil } @@ -478,18 +580,46 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u // ExchangeWithNetstack performs a DNS exchange using netstack for dialing. // This is needed when netstack is enabled to reach peer IPs through the tunnel. func ExchangeWithNetstack(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upstream string) (*dns.Msg, error) { - reply, err := netstackExchange(ctx, nsNet, r, upstream, "udp") + // If request came in over TCP, go straight to TCP upstream + if dnsProtocolFromContext(ctx) == protoTCP { + rm, err := netstackExchange(ctx, nsNet, r, upstream, protoTCP) + if err != nil { + return nil, err + } + setUpstreamProtocol(ctx, protoTCP) + return rm, nil + } + + clientMaxSize := clientUDPMaxSize(r) + + // Cap EDNS0 to our tunnel MTU so the upstream doesn't send a + // response larger than what we can read over UDP. + maxUDPPayload := uint16(currentMTU - ipUDPHeaderSize) + if opt := r.IsEdns0(); opt != nil && opt.UDPSize() > maxUDPPayload { + opt.SetUDPSize(maxUDPPayload) + } + + reply, err := netstackExchange(ctx, nsNet, r, upstream, protoUDP) if err != nil { return nil, err } - // If response is truncated, retry with TCP if reply != nil && reply.MsgHdr.Truncated { - log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP", - r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) - return netstackExchange(ctx, nsNet, r, upstream, "tcp") + rm, err := netstackExchange(ctx, nsNet, r, upstream, protoTCP) + if err != nil { + return nil, err + } + + setUpstreamProtocol(ctx, protoTCP) + if rm.Len() > clientMaxSize { + rm.Truncate(clientMaxSize) + } + + return rm, nil } + setUpstreamProtocol(ctx, protoUDP) + return reply, nil } @@ -510,7 +640,7 @@ func netstackExchange(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upst } } - dnsConn := &dns.Conn{Conn: conn} + dnsConn := &dns.Conn{Conn: conn, UDPSize: uint16(currentMTU - ipUDPHeaderSize)} if err := dnsConn.WriteMsg(r); err != nil { return nil, fmt.Errorf("write %s message: %w", network, err) diff --git a/client/internal/dns/upstream_android.go b/client/internal/dns/upstream_android.go index d7cff377b..ee1ca42fe 100644 --- a/client/internal/dns/upstream_android.go +++ b/client/internal/dns/upstream_android.go @@ -51,7 +51,7 @@ func (u *upstreamResolver) exchangeWithinVPN(ctx context.Context, upstream strin upstreamExchangeClient := &dns.Client{ Timeout: ClientTimeout, } - return upstreamExchangeClient.ExchangeContext(ctx, r, upstream) + return ExchangeWithFallback(ctx, upstreamExchangeClient, r, upstream) } // exchangeWithoutVPN protect the UDP socket by Android SDK to avoid to goes through the VPN @@ -76,7 +76,7 @@ func (u *upstreamResolver) exchangeWithoutVPN(ctx context.Context, upstream stri Timeout: timeout, } - return upstreamExchangeClient.ExchangeContext(ctx, r, upstream) + return ExchangeWithFallback(ctx, upstreamExchangeClient, r, upstream) } func (u *upstreamResolver) isLocalResolver(upstream string) bool { diff --git a/client/internal/dns/upstream_ios.go b/client/internal/dns/upstream_ios.go index 4d053a5a1..02c11173b 100644 --- a/client/internal/dns/upstream_ios.go +++ b/client/internal/dns/upstream_ios.go @@ -65,11 +65,13 @@ func (u *upstreamResolverIOS) exchange(ctx context.Context, upstream string, r * } else { upstreamIP = upstreamIP.Unmap() } - if u.lNet.Contains(upstreamIP) || upstreamIP.IsPrivate() { - log.Debugf("using private client to query upstream: %s", upstream) + needsPrivate := u.lNet.Contains(upstreamIP) || + (u.routeMatch != nil && u.routeMatch(upstreamIP)) + if needsPrivate { + log.Debugf("using private client to query %s via upstream %s", r.Question[0].Name, upstream) client, err = GetClientPrivate(u.lIP, u.interfaceName, timeout) if err != nil { - return nil, 0, fmt.Errorf("error while creating private client: %s", err) + return nil, 0, fmt.Errorf("create private client: %s", err) } } diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index ab164c30b..1797fdad8 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -475,3 +475,298 @@ func TestFormatFailures(t *testing.T) { }) } } + +func TestDNSProtocolContext(t *testing.T) { + t.Run("roundtrip udp", func(t *testing.T) { + ctx := contextWithDNSProtocol(context.Background(), protoUDP) + assert.Equal(t, protoUDP, dnsProtocolFromContext(ctx)) + }) + + t.Run("roundtrip tcp", func(t *testing.T) { + ctx := contextWithDNSProtocol(context.Background(), protoTCP) + assert.Equal(t, protoTCP, dnsProtocolFromContext(ctx)) + }) + + t.Run("missing returns empty", func(t *testing.T) { + assert.Equal(t, "", dnsProtocolFromContext(context.Background())) + }) +} + +func TestExchangeWithFallback_TCPContext(t *testing.T) { + // Start a local DNS server that responds on TCP only + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + tcpServer := &dns.Server{ + Addr: "127.0.0.1:0", + Net: "tcp", + Handler: tcpHandler, + } + + tcpLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + tcpServer.Listener = tcpLn + + go func() { + if err := tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + defer func() { + _ = tcpServer.Shutdown() + }() + + upstream := tcpLn.Addr().String() + + // With TCP context, should connect directly via TCP without trying UDP + ctx := contextWithDNSProtocol(context.Background(), protoTCP) + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + rm, _, err := ExchangeWithFallback(ctx, client, r, upstream) + require.NoError(t, err) + require.NotNil(t, rm) + require.NotEmpty(t, rm.Answer) + assert.Contains(t, rm.Answer[0].String(), "10.0.0.1") +} + +func TestExchangeWithFallback_UDPFallbackToTCP(t *testing.T) { + // UDP handler returns a truncated response to trigger TCP retry. + udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Truncated = true + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + // TCP handler returns the full answer. + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.3"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + udpPC, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + addr := udpPC.LocalAddr().String() + + udpServer := &dns.Server{ + PacketConn: udpPC, + Net: "udp", + Handler: udpHandler, + } + + tcpLn, err := net.Listen("tcp", addr) + require.NoError(t, err) + + tcpServer := &dns.Server{ + Listener: tcpLn, + Net: "tcp", + Handler: tcpHandler, + } + + go func() { + if err := udpServer.ActivateAndServe(); err != nil { + t.Logf("udp server: %v", err) + } + }() + go func() { + if err := tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + defer func() { + _ = udpServer.Shutdown() + _ = tcpServer.Shutdown() + }() + + ctx := context.Background() + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + rm, _, err := ExchangeWithFallback(ctx, client, r, addr) + require.NoError(t, err, "should fall back to TCP after truncated UDP response") + require.NotNil(t, rm) + require.NotEmpty(t, rm.Answer, "TCP response should contain the full answer") + assert.Contains(t, rm.Answer[0].String(), "10.0.0.3") + assert.False(t, rm.Truncated, "TCP response should not be truncated") +} + +func TestExchangeWithFallback_TCPContextSkipsUDP(t *testing.T) { + // Start only a TCP server (no UDP). With TCP context it should succeed. + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.2"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + tcpLn, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + tcpServer := &dns.Server{ + Listener: tcpLn, + Net: "tcp", + Handler: tcpHandler, + } + + go func() { + if err := tcpServer.ActivateAndServe(); err != nil { + t.Logf("tcp server: %v", err) + } + }() + defer func() { + _ = tcpServer.Shutdown() + }() + + upstream := tcpLn.Addr().String() + + // TCP context: should skip UDP entirely and go directly to TCP + ctx := contextWithDNSProtocol(context.Background(), protoTCP) + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + + rm, _, err := ExchangeWithFallback(ctx, client, r, upstream) + require.NoError(t, err) + require.NotNil(t, rm) + require.NotEmpty(t, rm.Answer) + assert.Contains(t, rm.Answer[0].String(), "10.0.0.2") + + // Without TCP context, trying to reach a TCP-only server via UDP should fail + ctx2 := context.Background() + client2 := &dns.Client{Timeout: 500 * time.Millisecond} + _, _, err = ExchangeWithFallback(ctx2, client2, r, upstream) + assert.Error(t, err, "should fail when no UDP server and no TCP context") +} + +func TestExchangeWithFallback_EDNS0Capped(t *testing.T) { + // Verify that a client EDNS0 larger than our MTU-derived limit gets + // capped in the outgoing request so the upstream doesn't send a + // response larger than our read buffer. + var receivedUDPSize uint16 + udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + if opt := r.IsEdns0(); opt != nil { + receivedUDPSize = opt.UDPSize() + } + m := new(dns.Msg) + m.SetReply(r) + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60}, + A: net.ParseIP("10.0.0.1"), + }) + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + udpPC, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + addr := udpPC.LocalAddr().String() + + udpServer := &dns.Server{PacketConn: udpPC, Net: "udp", Handler: udpHandler} + go func() { _ = udpServer.ActivateAndServe() }() + t.Cleanup(func() { _ = udpServer.Shutdown() }) + + ctx := context.Background() + client := &dns.Client{Timeout: 2 * time.Second} + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA) + r.SetEdns0(4096, false) + + rm, _, err := ExchangeWithFallback(ctx, client, r, addr) + require.NoError(t, err) + require.NotNil(t, rm) + + expectedMax := uint16(currentMTU - ipUDPHeaderSize) + assert.Equal(t, expectedMax, receivedUDPSize, + "upstream should see capped EDNS0, not the client's 4096") +} + +func TestExchangeWithFallback_TCPTruncatesToClientSize(t *testing.T) { + // When the client advertises a large EDNS0 (4096) and the upstream + // truncates, the TCP response should NOT be truncated since the full + // answer fits within the client's original buffer. + udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + m.Truncated = true + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + // Add enough records to exceed MTU but fit within 4096 + for i := range 20 { + m.Answer = append(m.Answer, &dns.TXT{ + Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 60}, + Txt: []string{fmt.Sprintf("record-%d-padding-data-to-make-it-longer", i)}, + }) + } + if err := w.WriteMsg(m); err != nil { + t.Logf("write msg: %v", err) + } + }) + + udpPC, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + addr := udpPC.LocalAddr().String() + + udpServer := &dns.Server{PacketConn: udpPC, Net: "udp", Handler: udpHandler} + tcpLn, err := net.Listen("tcp", addr) + require.NoError(t, err) + tcpServer := &dns.Server{Listener: tcpLn, Net: "tcp", Handler: tcpHandler} + + go func() { _ = udpServer.ActivateAndServe() }() + go func() { _ = tcpServer.ActivateAndServe() }() + t.Cleanup(func() { + _ = udpServer.Shutdown() + _ = tcpServer.Shutdown() + }) + + ctx := context.Background() + client := &dns.Client{Timeout: 2 * time.Second} + + // Client with large buffer: should get all records without truncation + r := new(dns.Msg).SetQuestion("example.com.", dns.TypeTXT) + r.SetEdns0(4096, false) + + rm, _, err := ExchangeWithFallback(ctx, client, r, addr) + require.NoError(t, err) + require.NotNil(t, rm) + assert.Len(t, rm.Answer, 20, "large EDNS0 client should get all records") + assert.False(t, rm.Truncated, "response should not be truncated for large buffer client") + + // Client with small buffer: should get truncated response + r2 := new(dns.Msg).SetQuestion("example.com.", dns.TypeTXT) + r2.SetEdns0(512, false) + + rm2, _, err := ExchangeWithFallback(ctx, &dns.Client{Timeout: 2 * time.Second}, r2, addr) + require.NoError(t, err) + require.NotNil(t, rm2) + assert.Less(t, len(rm2.Answer), 20, "small EDNS0 client should get fewer records") + assert.True(t, rm2.Truncated, "response should be truncated for small buffer client") +} diff --git a/client/internal/dnsfwd/forwarder.go b/client/internal/dnsfwd/forwarder.go index 5c7cb31fc..2e8ef84ab 100644 --- a/client/internal/dnsfwd/forwarder.go +++ b/client/internal/dnsfwd/forwarder.go @@ -237,8 +237,8 @@ func (f *DNSForwarder) writeResponse(logger *log.Entry, w dns.ResponseWriter, re return } - logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s", - qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime)) + logger.Tracef("response: domain=%s rcode=%s answers=%s size=%dB took=%s", + qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), resp.Len(), time.Since(startTime)) } // udpResponseWriter wraps a dns.ResponseWriter to handle UDP-specific truncation. @@ -263,20 +263,28 @@ func (u *udpResponseWriter) WriteMsg(resp *dns.Msg) error { func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) { startTime := time.Now() - logger := log.WithFields(log.Fields{ + fields := log.Fields{ "request_id": resutil.GenerateRequestID(), "dns_id": fmt.Sprintf("%04x", query.Id), - }) + } + if addr := w.RemoteAddr(); addr != nil { + fields["client"] = addr.String() + } + logger := log.WithFields(fields) f.handleDNSQuery(logger, &udpResponseWriter{ResponseWriter: w, query: query}, query, startTime) } func (f *DNSForwarder) handleDNSQueryTCP(w dns.ResponseWriter, query *dns.Msg) { startTime := time.Now() - logger := log.WithFields(log.Fields{ + fields := log.Fields{ "request_id": resutil.GenerateRequestID(), "dns_id": fmt.Sprintf("%04x", query.Id), - }) + } + if addr := w.RemoteAddr(); addr != nil { + fields["client"] = addr.String() + } + logger := log.WithFields(fields) f.handleDNSQuery(logger, w, query, startTime) } diff --git a/client/internal/engine.go b/client/internal/engine.go index ea1d3bec9..be2d8bbf3 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -46,6 +46,7 @@ import ( "github.com/netbirdio/netbird/client/internal/peer/guard" icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/peerstore" + "github.com/netbirdio/netbird/client/internal/portforward" "github.com/netbirdio/netbird/client/internal/profilemanager" "github.com/netbirdio/netbird/client/internal/relay" "github.com/netbirdio/netbird/client/internal/rosenpass" @@ -210,9 +211,10 @@ type Engine struct { // checks are the client-applied posture checks that need to be evaluated on the client checks []*mgmProto.Checks - relayManager *relayClient.Manager - stateManager *statemanager.Manager - srWatcher *guard.SRWatcher + relayManager *relayClient.Manager + stateManager *statemanager.Manager + portForwardManager *portforward.Manager + srWatcher *guard.SRWatcher // Sync response persistence (protected by syncRespMux) syncRespMux sync.RWMutex @@ -259,26 +261,27 @@ func NewEngine( mobileDep MobileDependency, ) *Engine { engine := &Engine{ - clientCtx: clientCtx, - clientCancel: clientCancel, - signal: services.SignalClient, - signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey), - mgmClient: services.MgmClient, - relayManager: services.RelayManager, - peerStore: peerstore.NewConnStore(), - syncMsgMux: &sync.Mutex{}, - config: config, - mobileDep: mobileDep, - STUNs: []*stun.URI{}, - TURNs: []*stun.URI{}, - networkSerial: 0, - statusRecorder: services.StatusRecorder, - stateManager: services.StateManager, - checks: services.Checks, - probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), - jobExecutor: jobexec.NewExecutor(), - clientMetrics: services.ClientMetrics, - updateManager: services.UpdateManager, + clientCtx: clientCtx, + clientCancel: clientCancel, + signal: services.SignalClient, + signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey), + mgmClient: services.MgmClient, + relayManager: services.RelayManager, + peerStore: peerstore.NewConnStore(), + syncMsgMux: &sync.Mutex{}, + config: config, + mobileDep: mobileDep, + STUNs: []*stun.URI{}, + TURNs: []*stun.URI{}, + networkSerial: 0, + statusRecorder: services.StatusRecorder, + stateManager: services.StateManager, + portForwardManager: portforward.NewManager(), + checks: services.Checks, + probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL), + jobExecutor: jobexec.NewExecutor(), + clientMetrics: services.ClientMetrics, + updateManager: services.UpdateManager, } log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String()) @@ -499,6 +502,17 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) e.routeManager.SetRouteChangeListener(e.mobileDep.NetworkChangeListener) + e.dnsServer.SetRouteChecker(func(ip netip.Addr) bool { + for _, routes := range e.routeManager.GetSelectedClientRoutes() { + for _, r := range routes { + if r.Network.Contains(ip) { + return true + } + } + } + return false + }) + if err = e.wgInterfaceCreate(); err != nil { log.Errorf("failed creating tunnel interface %s: [%s]", e.config.WgIfaceName, err.Error()) e.close() @@ -510,6 +524,11 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) return err } + // Inject firewall into DNS server now that it's available. + // The DNS server is created before the firewall because the route manager + // depends on the DNS server, and the firewall depends on the wg interface. + e.dnsServer.SetFirewall(e.firewall) + e.udpMux, err = e.wgInterface.Up() if err != nil { log.Errorf("failed to pull up wgInterface [%s]: %s", e.wgInterface.Name(), err.Error()) @@ -521,6 +540,13 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL) // conntrack entries from being created before the rules are in place e.setupWGProxyNoTrack() + // Start after interface is up since port may have been resolved from 0 or changed if occupied + e.shutdownWg.Add(1) + go func() { + defer e.shutdownWg.Done() + e.portForwardManager.Start(e.ctx, uint16(e.config.WgPort)) + }() + // Set the WireGuard interface for rosenpass after interface is up if e.rpManager != nil { e.rpManager.SetInterface(e.wgInterface) @@ -1524,12 +1550,13 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV } serviceDependencies := peer.ServiceDependencies{ - StatusRecorder: e.statusRecorder, - Signaler: e.signaler, - IFaceDiscover: e.mobileDep.IFaceDiscover, - RelayManager: e.relayManager, - SrWatcher: e.srWatcher, - MetricsRecorder: e.clientMetrics, + StatusRecorder: e.statusRecorder, + Signaler: e.signaler, + IFaceDiscover: e.mobileDep.IFaceDiscover, + RelayManager: e.relayManager, + SrWatcher: e.srWatcher, + PortForwardManager: e.portForwardManager, + MetricsRecorder: e.clientMetrics, } peerConn, err := peer.NewConn(config, serviceDependencies) if err != nil { @@ -1686,6 +1713,12 @@ func (e *Engine) close() { if e.rpManager != nil { _ = e.rpManager.Close() } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := e.portForwardManager.GracefullyStop(ctx); err != nil { + log.Warnf("failed to gracefully stop port forwarding manager: %s", err) + } } func (e *Engine) readInitialSettings() ([]*route.Route, *nbdns.Config, bool, error) { @@ -1789,7 +1822,7 @@ func (e *Engine) newDnsServer(dnsConfig *nbdns.Config) (dns.Server, error) { return dnsServer, nil case "ios": - dnsServer := dns.NewDefaultServerIos(e.ctx, e.wgInterface, e.mobileDep.DnsManager, e.statusRecorder, e.config.DisableDNS) + dnsServer := dns.NewDefaultServerIos(e.ctx, e.wgInterface, e.mobileDep.DnsManager, e.mobileDep.HostDNSAddresses, e.statusRecorder, e.config.DisableDNS) return dnsServer, nil default: @@ -1826,6 +1859,11 @@ func (e *Engine) GetExposeManager() *expose.Manager { return e.exposeManager } +// IsBlockInbound returns whether inbound connections are blocked. +func (e *Engine) IsBlockInbound() bool { + return e.config.BlockInbound +} + // GetClientMetrics returns the client metrics func (e *Engine) GetClientMetrics() *metrics.ClientMetrics { return e.clientMetrics diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go index 77fe9049b..1f6fe384a 100644 --- a/client/internal/engine_test.go +++ b/client/internal/engine_test.go @@ -828,7 +828,7 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) { WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, EngineServices{ + }, EngineServices{ SignalClient: &signal.MockClient{}, MgmClient: &mgmt.MockClient{}, RelayManager: relayMgr, @@ -1035,7 +1035,7 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) { WgPrivateKey: key, WgPort: 33100, MTU: iface.DefaultMTU, - }, EngineServices{ + }, EngineServices{ SignalClient: &signal.MockClient{}, MgmClient: &mgmt.MockClient{}, RelayManager: relayMgr, @@ -1538,13 +1538,8 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin return nil, err } - publicKey, err := mgmtClient.GetServerPublicKey() - if err != nil { - return nil, err - } - info := system.GetInfo(ctx) - resp, err := mgmtClient.Register(*publicKey, setupKey, "", info, nil, nil) + resp, err := mgmtClient.Register(setupKey, "", info, nil, nil) if err != nil { return nil, err } @@ -1566,7 +1561,7 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin } relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU) -e, err := NewEngine(ctx, cancel, conf, EngineServices{ + e, err := NewEngine(ctx, cancel, conf, EngineServices{ SignalClient: signalClient, MgmClient: mgmtClient, RelayManager: relayMgr, diff --git a/client/internal/expose/manager.go b/client/internal/expose/manager.go index c59a1a7bd..076f92043 100644 --- a/client/internal/expose/manager.go +++ b/client/internal/expose/manager.go @@ -4,11 +4,14 @@ import ( "context" "time" - mgm "github.com/netbirdio/netbird/shared/management/client" log "github.com/sirupsen/logrus" + + mgm "github.com/netbirdio/netbird/shared/management/client" ) -const renewTimeout = 10 * time.Second +const ( + renewTimeout = 10 * time.Second +) // Response holds the response from exposing a service. type Response struct { @@ -18,11 +21,13 @@ type Response struct { PortAutoAssigned bool } +// Request holds the parameters for exposing a local service via the management server. +// It is part of the embed API surface and exposed via a type alias. type Request struct { NamePrefix string Domain string Port uint16 - Protocol int + Protocol ProtocolType Pin string Password string UserGroups []string @@ -59,6 +64,8 @@ func (m *Manager) Expose(ctx context.Context, req Request) (*Response, error) { return fromClientExposeResponse(resp), nil } +// KeepAlive periodically renews the expose session for the given domain until the context is canceled or an error occurs. +// It is part of the embed API surface and exposed via a type alias. func (m *Manager) KeepAlive(ctx context.Context, domain string) error { ticker := time.NewTicker(30 * time.Second) defer ticker.Stop() diff --git a/client/internal/expose/manager_test.go b/client/internal/expose/manager_test.go index 87d43cdb0..7d76c9838 100644 --- a/client/internal/expose/manager_test.go +++ b/client/internal/expose/manager_test.go @@ -86,7 +86,7 @@ func TestNewRequest(t *testing.T) { exposeReq := NewRequest(req) assert.Equal(t, uint16(8080), exposeReq.Port, "port should match") - assert.Equal(t, int(daemonProto.ExposeProtocol_EXPOSE_HTTPS), exposeReq.Protocol, "protocol should match") + assert.Equal(t, ProtocolType(daemonProto.ExposeProtocol_EXPOSE_HTTPS), exposeReq.Protocol, "protocol should match") assert.Equal(t, "123456", exposeReq.Pin, "pin should match") assert.Equal(t, "secret", exposeReq.Password, "password should match") assert.Equal(t, []string{"group1", "group2"}, exposeReq.UserGroups, "user groups should match") diff --git a/client/internal/expose/protocol.go b/client/internal/expose/protocol.go new file mode 100644 index 000000000..d5026d51e --- /dev/null +++ b/client/internal/expose/protocol.go @@ -0,0 +1,40 @@ +package expose + +import ( + "fmt" + "strings" +) + +// ProtocolType represents the protocol used for exposing a service. +type ProtocolType int + +const ( + // ProtocolHTTP exposes the service as HTTP. + ProtocolHTTP ProtocolType = 0 + // ProtocolHTTPS exposes the service as HTTPS. + ProtocolHTTPS ProtocolType = 1 + // ProtocolTCP exposes the service as TCP. + ProtocolTCP ProtocolType = 2 + // ProtocolUDP exposes the service as UDP. + ProtocolUDP ProtocolType = 3 + // ProtocolTLS exposes the service as TLS. + ProtocolTLS ProtocolType = 4 +) + +// ParseProtocolType parses a protocol string into a ProtocolType. +func ParseProtocolType(s string) (ProtocolType, error) { + switch strings.ToLower(s) { + case "http": + return ProtocolHTTP, nil + case "https": + return ProtocolHTTPS, nil + case "tcp": + return ProtocolTCP, nil + case "udp": + return ProtocolUDP, nil + case "tls": + return ProtocolTLS, nil + default: + return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", s) + } +} diff --git a/client/internal/expose/request.go b/client/internal/expose/request.go index bff4f2ce7..ec75bb276 100644 --- a/client/internal/expose/request.go +++ b/client/internal/expose/request.go @@ -9,7 +9,7 @@ import ( func NewRequest(req *daemonProto.ExposeServiceRequest) *Request { return &Request{ Port: uint16(req.Port), - Protocol: int(req.Protocol), + Protocol: ProtocolType(req.Protocol), Pin: req.Pin, Password: req.Password, UserGroups: req.UserGroups, @@ -24,7 +24,7 @@ func toClientExposeRequest(req Request) mgm.ExposeRequest { NamePrefix: req.NamePrefix, Domain: req.Domain, Port: req.Port, - Protocol: req.Protocol, + Protocol: int(req.Protocol), Pin: req.Pin, Password: req.Password, UserGroups: req.UserGroups, diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index bea0725f2..8d1585b3f 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -22,6 +22,7 @@ import ( icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" "github.com/netbirdio/netbird/client/internal/peer/id" "github.com/netbirdio/netbird/client/internal/peer/worker" + "github.com/netbirdio/netbird/client/internal/portforward" "github.com/netbirdio/netbird/client/internal/stdnet" "github.com/netbirdio/netbird/route" relayClient "github.com/netbirdio/netbird/shared/relay/client" @@ -45,6 +46,7 @@ type ServiceDependencies struct { RelayManager *relayClient.Manager SrWatcher *guard.SRWatcher PeerConnDispatcher *dispatcher.ConnectionDispatcher + PortForwardManager *portforward.Manager MetricsRecorder MetricsRecorder } @@ -87,16 +89,17 @@ type ConnConfig struct { } type Conn struct { - Log *log.Entry - mu sync.Mutex - ctx context.Context - ctxCancel context.CancelFunc - config ConnConfig - statusRecorder *Status - signaler *Signaler - iFaceDiscover stdnet.ExternalIFaceDiscover - relayManager *relayClient.Manager - srWatcher *guard.SRWatcher + Log *log.Entry + mu sync.Mutex + ctx context.Context + ctxCancel context.CancelFunc + config ConnConfig + statusRecorder *Status + signaler *Signaler + iFaceDiscover stdnet.ExternalIFaceDiscover + relayManager *relayClient.Manager + srWatcher *guard.SRWatcher + portForwardManager *portforward.Manager onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string) onDisconnected func(remotePeer string) @@ -145,19 +148,20 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) { dumpState := newStateDump(config.Key, connLog, services.StatusRecorder) var conn = &Conn{ - Log: connLog, - config: config, - statusRecorder: services.StatusRecorder, - signaler: services.Signaler, - iFaceDiscover: services.IFaceDiscover, - relayManager: services.RelayManager, - srWatcher: services.SrWatcher, - statusRelay: worker.NewAtomicStatus(), - statusICE: worker.NewAtomicStatus(), - dumpState: dumpState, - endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), - wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), - metricsRecorder: services.MetricsRecorder, + Log: connLog, + config: config, + statusRecorder: services.StatusRecorder, + signaler: services.Signaler, + iFaceDiscover: services.IFaceDiscover, + relayManager: services.RelayManager, + srWatcher: services.SrWatcher, + portForwardManager: services.PortForwardManager, + statusRelay: worker.NewAtomicStatus(), + statusICE: worker.NewAtomicStatus(), + dumpState: dumpState, + endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)), + wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState), + metricsRecorder: services.MetricsRecorder, } return conn, nil diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index edd70fb20..29bf5aaaa 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -16,6 +16,7 @@ import ( "github.com/netbirdio/netbird/client/iface/udpmux" "github.com/netbirdio/netbird/client/internal/peer/conntype" icemaker "github.com/netbirdio/netbird/client/internal/peer/ice" + "github.com/netbirdio/netbird/client/internal/portforward" "github.com/netbirdio/netbird/client/internal/stdnet" "github.com/netbirdio/netbird/route" ) @@ -61,6 +62,9 @@ type WorkerICE struct { // we record the last known state of the ICE agent to avoid duplicate on disconnected events lastKnownState ice.ConnectionState + + // portForwardAttempted tracks if we've already tried port forwarding this session + portForwardAttempted bool } func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, conn *Conn, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, hasRelayOnLocally bool) (*WorkerICE, error) { @@ -214,6 +218,8 @@ func (w *WorkerICE) Close() { } func (w *WorkerICE) reCreateAgent(dialerCancel context.CancelFunc, candidates []ice.CandidateType) (*icemaker.ThreadSafeAgent, error) { + w.portForwardAttempted = false + agent, err := icemaker.NewAgent(w.ctx, w.iFaceDiscover, w.config.ICEConfig, candidates, w.localUfrag, w.localPwd) if err != nil { return nil, fmt.Errorf("create agent: %w", err) @@ -370,6 +376,93 @@ func (w *WorkerICE) onICECandidate(candidate ice.Candidate) { w.log.Errorf("failed signaling candidate to the remote peer %s %s", w.config.Key, err) } }() + + if candidate.Type() == ice.CandidateTypeServerReflexive { + w.injectPortForwardedCandidate(candidate) + } +} + +// injectPortForwardedCandidate signals an additional candidate using the pre-created port mapping. +func (w *WorkerICE) injectPortForwardedCandidate(srflxCandidate ice.Candidate) { + pfManager := w.conn.portForwardManager + if pfManager == nil { + return + } + + mapping := pfManager.GetMapping() + if mapping == nil { + return + } + + w.muxAgent.Lock() + if w.portForwardAttempted { + w.muxAgent.Unlock() + return + } + w.portForwardAttempted = true + w.muxAgent.Unlock() + + forwardedCandidate, err := w.createForwardedCandidate(srflxCandidate, mapping) + if err != nil { + w.log.Warnf("create forwarded candidate: %v", err) + return + } + + w.log.Debugf("injecting port-forwarded candidate: %s (mapping: %d -> %d via %s, priority: %d)", + forwardedCandidate.String(), mapping.InternalPort, mapping.ExternalPort, mapping.NATType, forwardedCandidate.Priority()) + + go func() { + if err := w.signaler.SignalICECandidate(forwardedCandidate, w.config.Key); err != nil { + w.log.Errorf("signal port-forwarded candidate: %v", err) + } + }() +} + +// createForwardedCandidate creates a new server reflexive candidate with the forwarded port. +// It uses the NAT gateway's external IP with the forwarded port. +func (w *WorkerICE) createForwardedCandidate(srflxCandidate ice.Candidate, mapping *portforward.Mapping) (ice.Candidate, error) { + var externalIP string + if mapping.ExternalIP != nil && !mapping.ExternalIP.IsUnspecified() { + externalIP = mapping.ExternalIP.String() + } else { + // Fallback to STUN-discovered address if NAT didn't provide external IP + externalIP = srflxCandidate.Address() + } + + // Per RFC 8445, the related address for srflx is the base (host candidate address). + // If the original srflx has unspecified related address, use its own address as base. + relAddr := srflxCandidate.RelatedAddress().Address + if relAddr == "" || relAddr == "0.0.0.0" || relAddr == "::" { + relAddr = srflxCandidate.Address() + } + + // Arbitrary +1000 boost on top of RFC 8445 priority to favor port-forwarded candidates + // over regular srflx during ICE connectivity checks. + priority := srflxCandidate.Priority() + 1000 + + candidate, err := ice.NewCandidateServerReflexive(&ice.CandidateServerReflexiveConfig{ + Network: srflxCandidate.NetworkType().String(), + Address: externalIP, + Port: int(mapping.ExternalPort), + Component: srflxCandidate.Component(), + Priority: priority, + RelAddr: relAddr, + RelPort: int(mapping.InternalPort), + }) + if err != nil { + return nil, fmt.Errorf("create candidate: %w", err) + } + + for _, e := range srflxCandidate.Extensions() { + if e.Key == ice.ExtensionKeyCandidateID { + e.Value = srflxCandidate.ID() + } + if err := candidate.AddExtension(e); err != nil { + return nil, fmt.Errorf("add extension: %w", err) + } + } + + return candidate, nil } func (w *WorkerICE) onICESelectedCandidatePair(agent *icemaker.ThreadSafeAgent, c1, c2 ice.Candidate) { @@ -411,10 +504,10 @@ func (w *WorkerICE) logSuccessfulPaths(agent *icemaker.ThreadSafeAgent) { if !lok || !rok { continue } - w.log.Debugf("successful ICE path %s: [%s %s %s] <-> [%s %s %s] rtt=%.3fms", + w.log.Debugf("successful ICE path %s: [%s %s %s:%d] <-> [%s %s %s:%d] rtt=%.3fms", sessionID, - local.NetworkType(), local.Type(), local.Address(), - remote.NetworkType(), remote.Type(), remote.Address(), + local.NetworkType(), local.Type(), local.Address(), local.Port(), + remote.NetworkType(), remote.Type(), remote.Address(), remote.Port(), stat.CurrentRoundTripTime*1000) } } diff --git a/client/internal/portforward/env.go b/client/internal/portforward/env.go new file mode 100644 index 000000000..444a6b478 --- /dev/null +++ b/client/internal/portforward/env.go @@ -0,0 +1,26 @@ +package portforward + +import ( + "os" + "strconv" + + log "github.com/sirupsen/logrus" +) + +const ( + envDisableNATMapper = "NB_DISABLE_NAT_MAPPER" +) + +func isDisabledByEnv() bool { + val := os.Getenv(envDisableNATMapper) + if val == "" { + return false + } + + disabled, err := strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", envDisableNATMapper, err) + return false + } + return disabled +} diff --git a/client/internal/portforward/manager.go b/client/internal/portforward/manager.go new file mode 100644 index 000000000..bf7533af9 --- /dev/null +++ b/client/internal/portforward/manager.go @@ -0,0 +1,280 @@ +//go:build !js + +package portforward + +import ( + "context" + "fmt" + "net" + "regexp" + "sync" + "time" + + "github.com/libp2p/go-nat" + log "github.com/sirupsen/logrus" +) + +const ( + defaultMappingTTL = 2 * time.Hour + discoveryTimeout = 10 * time.Second + mappingDescription = "NetBird" +) + +// upnpErrPermanentLeaseOnly matches UPnP error 725 in SOAP fault XML, +// allowing for whitespace/newlines between tags from different router firmware. +var upnpErrPermanentLeaseOnly = regexp.MustCompile(`\s*725\s*`) + +// Mapping represents an active NAT port mapping. +type Mapping struct { + Protocol string + InternalPort uint16 + ExternalPort uint16 + ExternalIP net.IP + NATType string + // TTL is the lease duration. Zero means a permanent lease that never expires. + TTL time.Duration +} + +// TODO: persist mapping state for crash recovery cleanup of permanent leases. +// Currently not done because State.Cleanup requires NAT gateway re-discovery, +// which blocks startup for ~10s when no gateway is present (affects all clients). + +type Manager struct { + cancel context.CancelFunc + + mapping *Mapping + mappingLock sync.Mutex + + wgPort uint16 + + done chan struct{} + stopCtx chan context.Context + + // protect exported functions + mu sync.Mutex +} + +// NewManager creates a new port forwarding manager. +func NewManager() *Manager { + return &Manager{ + stopCtx: make(chan context.Context, 1), + } +} + +func (m *Manager) Start(ctx context.Context, wgPort uint16) { + m.mu.Lock() + if m.cancel != nil { + m.mu.Unlock() + return + } + + if isDisabledByEnv() { + log.Infof("NAT port mapper disabled via %s", envDisableNATMapper) + m.mu.Unlock() + return + } + + if wgPort == 0 { + log.Warnf("invalid WireGuard port 0; NAT mapping disabled") + m.mu.Unlock() + return + } + m.wgPort = wgPort + + m.done = make(chan struct{}) + defer close(m.done) + + ctx, m.cancel = context.WithCancel(ctx) + m.mu.Unlock() + + gateway, mapping, err := m.setup(ctx) + if err != nil { + log.Infof("port forwarding setup: %v", err) + return + } + + m.mappingLock.Lock() + m.mapping = mapping + m.mappingLock.Unlock() + + m.renewLoop(ctx, gateway, mapping.TTL) + + select { + case cleanupCtx := <-m.stopCtx: + // block the Start while cleaned up gracefully + m.cleanup(cleanupCtx, gateway) + default: + // return Start immediately and cleanup in background + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 10*time.Second) + go func() { + defer cleanupCancel() + m.cleanup(cleanupCtx, gateway) + }() + } +} + +// GetMapping returns the current mapping if ready, nil otherwise +func (m *Manager) GetMapping() *Mapping { + m.mappingLock.Lock() + defer m.mappingLock.Unlock() + + if m.mapping == nil { + return nil + } + + mapping := *m.mapping + return &mapping +} + +// GracefullyStop cancels the manager and attempts to delete the port mapping. +// After GracefullyStop returns, the manager cannot be restarted. +func (m *Manager) GracefullyStop(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.cancel == nil { + return nil + } + + // Send cleanup context before cancelling, so Start picks it up after renewLoop exits. + m.startTearDown(ctx) + + m.cancel() + m.cancel = nil + + select { + case <-ctx.Done(): + return ctx.Err() + case <-m.done: + return nil + } +} + +func (m *Manager) setup(ctx context.Context) (nat.NAT, *Mapping, error) { + discoverCtx, discoverCancel := context.WithTimeout(ctx, discoveryTimeout) + defer discoverCancel() + + gateway, err := nat.DiscoverGateway(discoverCtx) + if err != nil { + return nil, nil, fmt.Errorf("discover gateway: %w", err) + } + + log.Infof("discovered NAT gateway: %s", gateway.Type()) + + mapping, err := m.createMapping(ctx, gateway) + if err != nil { + return nil, nil, fmt.Errorf("create port mapping: %w", err) + } + return gateway, mapping, nil +} + +func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + ttl := defaultMappingTTL + externalPort, err := gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, ttl) + if err != nil { + if !isPermanentLeaseRequired(err) { + return nil, err + } + log.Infof("gateway only supports permanent leases, retrying with indefinite duration") + ttl = 0 + externalPort, err = gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, ttl) + if err != nil { + return nil, err + } + } + + externalIP, err := gateway.GetExternalAddress() + if err != nil { + log.Debugf("failed to get external address: %v", err) + // todo return with err? + } + + mapping := &Mapping{ + Protocol: "udp", + InternalPort: m.wgPort, + ExternalPort: uint16(externalPort), + ExternalIP: externalIP, + NATType: gateway.Type(), + TTL: ttl, + } + + log.Infof("created port mapping: %d -> %d via %s (external IP: %s)", + m.wgPort, externalPort, gateway.Type(), externalIP) + return mapping, nil +} + +func (m *Manager) renewLoop(ctx context.Context, gateway nat.NAT, ttl time.Duration) { + if ttl == 0 { + // Permanent mappings don't expire, just wait for cancellation. + <-ctx.Done() + return + } + + ticker := time.NewTicker(ttl / 2) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if err := m.renewMapping(ctx, gateway); err != nil { + log.Warnf("failed to renew port mapping: %v", err) + continue + } + } + } +} + +func (m *Manager) renewMapping(ctx context.Context, gateway nat.NAT) error { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + externalPort, err := gateway.AddPortMapping(ctx, m.mapping.Protocol, int(m.mapping.InternalPort), mappingDescription, m.mapping.TTL) + if err != nil { + return fmt.Errorf("add port mapping: %w", err) + } + + if uint16(externalPort) != m.mapping.ExternalPort { + log.Warnf("external port changed on renewal: %d -> %d (candidate may be stale)", m.mapping.ExternalPort, externalPort) + m.mappingLock.Lock() + m.mapping.ExternalPort = uint16(externalPort) + m.mappingLock.Unlock() + } + + log.Debugf("renewed port mapping: %d -> %d", m.mapping.InternalPort, m.mapping.ExternalPort) + return nil +} + +func (m *Manager) cleanup(ctx context.Context, gateway nat.NAT) { + m.mappingLock.Lock() + mapping := m.mapping + m.mapping = nil + m.mappingLock.Unlock() + + if mapping == nil { + return + } + + if err := gateway.DeletePortMapping(ctx, mapping.Protocol, int(mapping.InternalPort)); err != nil { + log.Warnf("delete port mapping on stop: %v", err) + return + } + + log.Infof("deleted port mapping for port %d", mapping.InternalPort) +} + +func (m *Manager) startTearDown(ctx context.Context) { + select { + case m.stopCtx <- ctx: + default: + } +} + +// isPermanentLeaseRequired checks if a UPnP error indicates the gateway only supports permanent leases (error 725). +func isPermanentLeaseRequired(err error) bool { + return err != nil && upnpErrPermanentLeaseOnly.MatchString(err.Error()) +} diff --git a/client/internal/portforward/manager_js.go b/client/internal/portforward/manager_js.go new file mode 100644 index 000000000..36c55063b --- /dev/null +++ b/client/internal/portforward/manager_js.go @@ -0,0 +1,39 @@ +package portforward + +import ( + "context" + "net" + "time" +) + +// Mapping represents an active NAT port mapping. +type Mapping struct { + Protocol string + InternalPort uint16 + ExternalPort uint16 + ExternalIP net.IP + NATType string + // TTL is the lease duration. Zero means a permanent lease that never expires. + TTL time.Duration +} + +// Manager is a stub for js/wasm builds where NAT-PMP/UPnP is not supported. +type Manager struct{} + +// NewManager returns a stub manager for js/wasm builds. +func NewManager() *Manager { + return &Manager{} +} + +// Start is a no-op on js/wasm: NAT-PMP/UPnP is not available in browser environments. +func (m *Manager) Start(context.Context, uint16) { + // no NAT traversal in wasm +} + +// GracefullyStop is a no-op on js/wasm. +func (m *Manager) GracefullyStop(context.Context) error { return nil } + +// GetMapping always returns nil on js/wasm. +func (m *Manager) GetMapping() *Mapping { + return nil +} diff --git a/client/internal/portforward/manager_test.go b/client/internal/portforward/manager_test.go new file mode 100644 index 000000000..1f66f9ccd --- /dev/null +++ b/client/internal/portforward/manager_test.go @@ -0,0 +1,201 @@ +//go:build !js + +package portforward + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type mockNAT struct { + natType string + deviceAddr net.IP + externalAddr net.IP + internalAddr net.IP + mappings map[int]int + addMappingErr error + deleteMappingErr error + onlyPermanentLeases bool + lastTimeout time.Duration +} + +func newMockNAT() *mockNAT { + return &mockNAT{ + natType: "Mock-NAT", + deviceAddr: net.ParseIP("192.168.1.1"), + externalAddr: net.ParseIP("203.0.113.50"), + internalAddr: net.ParseIP("192.168.1.100"), + mappings: make(map[int]int), + } +} + +func (m *mockNAT) Type() string { + return m.natType +} + +func (m *mockNAT) GetDeviceAddress() (net.IP, error) { + return m.deviceAddr, nil +} + +func (m *mockNAT) GetExternalAddress() (net.IP, error) { + return m.externalAddr, nil +} + +func (m *mockNAT) GetInternalAddress() (net.IP, error) { + return m.internalAddr, nil +} + +func (m *mockNAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) { + if m.addMappingErr != nil { + return 0, m.addMappingErr + } + if m.onlyPermanentLeases && timeout != 0 { + return 0, fmt.Errorf("SOAP fault. Code: | Explanation: | Detail: 725OnlyPermanentLeasesSupported") + } + externalPort := internalPort + m.mappings[internalPort] = externalPort + m.lastTimeout = timeout + return externalPort, nil +} + +func (m *mockNAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error { + if m.deleteMappingErr != nil { + return m.deleteMappingErr + } + delete(m.mappings, internalPort) + return nil +} + +func TestManager_CreateMapping(t *testing.T) { + m := NewManager() + m.wgPort = 51820 + + gateway := newMockNAT() + mapping, err := m.createMapping(context.Background(), gateway) + require.NoError(t, err) + require.NotNil(t, mapping) + + assert.Equal(t, "udp", mapping.Protocol) + assert.Equal(t, uint16(51820), mapping.InternalPort) + assert.Equal(t, uint16(51820), mapping.ExternalPort) + assert.Equal(t, "Mock-NAT", mapping.NATType) + assert.Equal(t, net.ParseIP("203.0.113.50").To4(), mapping.ExternalIP.To4()) + assert.Equal(t, defaultMappingTTL, mapping.TTL) +} + +func TestManager_GetMapping_ReturnsNilWhenNotReady(t *testing.T) { + m := NewManager() + assert.Nil(t, m.GetMapping()) +} + +func TestManager_GetMapping_ReturnsCopy(t *testing.T) { + m := NewManager() + m.mapping = &Mapping{ + Protocol: "udp", + InternalPort: 51820, + ExternalPort: 51820, + } + + mapping := m.GetMapping() + require.NotNil(t, mapping) + assert.Equal(t, uint16(51820), mapping.InternalPort) + + // Mutating the returned copy should not affect the manager's mapping. + mapping.ExternalPort = 9999 + assert.Equal(t, uint16(51820), m.GetMapping().ExternalPort) +} + +func TestManager_Cleanup_DeletesMapping(t *testing.T) { + m := NewManager() + m.mapping = &Mapping{ + Protocol: "udp", + InternalPort: 51820, + ExternalPort: 51820, + } + + gateway := newMockNAT() + // Seed the mock so we can verify deletion. + gateway.mappings[51820] = 51820 + + m.cleanup(context.Background(), gateway) + + _, exists := gateway.mappings[51820] + assert.False(t, exists, "mapping should be deleted from gateway") + assert.Nil(t, m.GetMapping(), "in-memory mapping should be cleared") +} + +func TestManager_Cleanup_NilMapping(t *testing.T) { + m := NewManager() + gateway := newMockNAT() + + // Should not panic or call gateway. + m.cleanup(context.Background(), gateway) +} + + +func TestManager_CreateMapping_PermanentLeaseFallback(t *testing.T) { + m := NewManager() + m.wgPort = 51820 + + gateway := newMockNAT() + gateway.onlyPermanentLeases = true + + mapping, err := m.createMapping(context.Background(), gateway) + require.NoError(t, err) + require.NotNil(t, mapping) + + assert.Equal(t, uint16(51820), mapping.InternalPort) + assert.Equal(t, time.Duration(0), mapping.TTL, "should return zero TTL for permanent lease") + assert.Equal(t, time.Duration(0), gateway.lastTimeout, "should have retried with zero duration") +} + +func TestIsPermanentLeaseRequired(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "nil error", + err: nil, + expected: false, + }, + { + name: "UPnP error 725", + err: fmt.Errorf("SOAP fault. Code: | Detail: 725OnlyPermanentLeasesSupported"), + expected: true, + }, + { + name: "wrapped error with 725", + err: fmt.Errorf("add port mapping: %w", fmt.Errorf("Detail: 725")), + expected: true, + }, + { + name: "error 725 with newlines in XML", + err: fmt.Errorf("\n 725\n"), + expected: true, + }, + { + name: "bare 725 without XML tag", + err: fmt.Errorf("error code 725"), + expected: false, + }, + { + name: "unrelated error", + err: fmt.Errorf("connection refused"), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, isPermanentLeaseRequired(tt.err)) + }) + } +} diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go index b27f1932f..20c615d57 100644 --- a/client/internal/profilemanager/config.go +++ b/client/internal/profilemanager/config.go @@ -39,6 +39,18 @@ const ( DefaultAdminURL = "https://app.netbird.io:443" ) +// mgmProber is the subset of management client needed for URL migration probes. +type mgmProber interface { + HealthCheck() error + Close() error +} + +// newMgmProber creates a management client for probing URL reachability. +// Overridden in tests to avoid real network calls. +var newMgmProber = func(ctx context.Context, addr string, key wgtypes.Key, tlsEnabled bool) (mgmProber, error) { + return mgm.NewClient(ctx, addr, key, tlsEnabled) +} + var DefaultInterfaceBlacklist = []string{ iface.WgInterfaceDefault, "wt", "utun", "tun0", "zt", "ZeroTier", "wg", "ts", "Tailscale", "tailscale", "docker", "veth", "br-", "lo", @@ -753,21 +765,19 @@ func UpdateOldManagementURL(ctx context.Context, config *Config, configPath stri return config, err } - client, err := mgm.NewClient(ctx, newURL.Host, key, mgmTlsEnabled) + client, err := newMgmProber(ctx, newURL.Host, key, mgmTlsEnabled) if err != nil { log.Infof("couldn't switch to the new Management %s", newURL.String()) return config, err } defer func() { - err = client.Close() - if err != nil { + if err := client.Close(); err != nil { log.Warnf("failed to close the Management service client %v", err) } }() // gRPC check - _, err = client.GetServerPublicKey() - if err != nil { + if err = client.HealthCheck(); err != nil { log.Infof("couldn't switch to the new Management %s", newURL.String()) return nil, err } diff --git a/client/internal/profilemanager/config_test.go b/client/internal/profilemanager/config_test.go index ab13cf389..5216f2423 100644 --- a/client/internal/profilemanager/config_test.go +++ b/client/internal/profilemanager/config_test.go @@ -10,12 +10,21 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/internal/routemanager/dynamic" "github.com/netbirdio/netbird/util" ) +type mockMgmProber struct{} + +func (m *mockMgmProber) HealthCheck() error { + return nil +} + +func (m *mockMgmProber) Close() error { return nil } + func TestGetConfig(t *testing.T) { // case 1: new default config has to be generated config, err := UpdateOrCreateConfig(ConfigInput{ @@ -234,6 +243,12 @@ func TestWireguardPortDefaultVsExplicit(t *testing.T) { } func TestUpdateOldManagementURL(t *testing.T) { + origProber := newMgmProber + newMgmProber = func(_ context.Context, _ string, _ wgtypes.Key, _ bool) (mgmProber, error) { + return &mockMgmProber{}, nil + } + t.Cleanup(func() { newMgmProber = origProber }) + tests := []struct { name string previousManagementURL string @@ -273,18 +288,17 @@ func TestUpdateOldManagementURL(t *testing.T) { ConfigPath: configPath, }) require.NoError(t, err, "failed to create testing config") - previousStats, err := os.Stat(configPath) - require.NoError(t, err, "failed to create testing config stats") + previousContent, err := os.ReadFile(configPath) + require.NoError(t, err, "failed to read initial config") resultConfig, err := UpdateOldManagementURL(context.TODO(), config, configPath) require.NoError(t, err, "got error when updating old management url") require.Equal(t, tt.expectedManagementURL, resultConfig.ManagementURL.String()) - newStats, err := os.Stat(configPath) - require.NoError(t, err, "failed to create testing config stats") - switch tt.fileShouldNotChange { - case true: - require.Equal(t, previousStats.ModTime(), newStats.ModTime(), "file should not change") - case false: - require.NotEqual(t, previousStats.ModTime(), newStats.ModTime(), "file should have changed") + newContent, err := os.ReadFile(configPath) + require.NoError(t, err, "failed to read updated config") + if tt.fileShouldNotChange { + require.Equal(t, string(previousContent), string(newContent), "file should not change") + } else { + require.NotEqual(t, string(previousContent), string(newContent), "file should have changed") } }) } diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go index 9afe2049d..3923e153b 100644 --- a/client/internal/routemanager/manager.go +++ b/client/internal/routemanager/manager.go @@ -52,6 +52,7 @@ type Manager interface { TriggerSelection(route.HAMap) GetRouteSelector() *routeselector.RouteSelector GetClientRoutes() route.HAMap + GetSelectedClientRoutes() route.HAMap GetClientRoutesWithNetID() map[route.NetID][]*route.Route SetRouteChangeListener(listener listener.NetworkChangeListener) InitialRouteRange() []string @@ -167,6 +168,7 @@ func (m *DefaultManager) setupAndroidRoutes(config ManagerConfig) { NetworkType: route.IPv4Network, } cr = append(cr, fakeIPRoute) + m.notifier.SetFakeIPRoute(fakeIPRoute) } m.notifier.SetInitialClientRoutes(cr, routesForComparison) @@ -465,6 +467,16 @@ func (m *DefaultManager) GetClientRoutes() route.HAMap { return maps.Clone(m.clientRoutes) } +// GetSelectedClientRoutes returns only the currently selected/active client routes, +// filtering out deselected exit nodes. Use this instead of GetClientRoutes when checking +// if traffic should be routed through the tunnel. +func (m *DefaultManager) GetSelectedClientRoutes() route.HAMap { + m.mux.Lock() + defer m.mux.Unlock() + + return m.routeSelector.FilterSelectedExitNodes(maps.Clone(m.clientRoutes)) +} + // GetClientRoutesWithNetID returns the current routes from the route map, but the keys consist of the network ID only func (m *DefaultManager) GetClientRoutesWithNetID() map[route.NetID][]*route.Route { m.mux.Lock() diff --git a/client/internal/routemanager/mock.go b/client/internal/routemanager/mock.go index 6b06144b2..66b5e30dd 100644 --- a/client/internal/routemanager/mock.go +++ b/client/internal/routemanager/mock.go @@ -18,6 +18,7 @@ type MockManager struct { TriggerSelectionFunc func(haMap route.HAMap) GetRouteSelectorFunc func() *routeselector.RouteSelector GetClientRoutesFunc func() route.HAMap + GetSelectedClientRoutesFunc func() route.HAMap GetClientRoutesWithNetIDFunc func() map[route.NetID][]*route.Route StopFunc func(manager *statemanager.Manager) } @@ -61,7 +62,7 @@ func (m *MockManager) GetRouteSelector() *routeselector.RouteSelector { return nil } -// GetClientRoutes mock implementation of GetClientRoutes from Manager interface +// GetClientRoutes mock implementation of GetClientRoutes from the Manager interface func (m *MockManager) GetClientRoutes() route.HAMap { if m.GetClientRoutesFunc != nil { return m.GetClientRoutesFunc() @@ -69,6 +70,14 @@ func (m *MockManager) GetClientRoutes() route.HAMap { return nil } +// GetSelectedClientRoutes mock implementation of GetSelectedClientRoutes from the Manager interface +func (m *MockManager) GetSelectedClientRoutes() route.HAMap { + if m.GetSelectedClientRoutesFunc != nil { + return m.GetSelectedClientRoutesFunc() + } + return nil +} + // GetClientRoutesWithNetID mock implementation of GetClientRoutesWithNetID from Manager interface func (m *MockManager) GetClientRoutesWithNetID() map[route.NetID][]*route.Route { if m.GetClientRoutesWithNetIDFunc != nil { diff --git a/client/internal/routemanager/notifier/notifier_android.go b/client/internal/routemanager/notifier/notifier_android.go index dec0af87c..55e0b7421 100644 --- a/client/internal/routemanager/notifier/notifier_android.go +++ b/client/internal/routemanager/notifier/notifier_android.go @@ -16,6 +16,7 @@ import ( type Notifier struct { initialRoutes []*route.Route currentRoutes []*route.Route + fakeIPRoute *route.Route listener listener.NetworkChangeListener listenerMux sync.Mutex @@ -31,26 +32,15 @@ func (n *Notifier) SetListener(listener listener.NetworkChangeListener) { n.listener = listener } +// SetInitialClientRoutes stores the initial route sets for TUN configuration. func (n *Notifier) SetInitialClientRoutes(initialRoutes []*route.Route, routesForComparison []*route.Route) { - // initialRoutes contains fake IP block for interface configuration - filteredInitial := make([]*route.Route, 0) - for _, r := range initialRoutes { - if r.IsDynamic() { - continue - } - filteredInitial = append(filteredInitial, r) - } - n.initialRoutes = filteredInitial + n.initialRoutes = filterStatic(initialRoutes) + n.currentRoutes = filterStatic(routesForComparison) +} - // routesForComparison excludes fake IP block for comparison with new routes - filteredComparison := make([]*route.Route, 0) - for _, r := range routesForComparison { - if r.IsDynamic() { - continue - } - filteredComparison = append(filteredComparison, r) - } - n.currentRoutes = filteredComparison +// SetFakeIPRoute stores the fake IP route to be included in every TUN rebuild. +func (n *Notifier) SetFakeIPRoute(r *route.Route) { + n.fakeIPRoute = r } func (n *Notifier) OnNewRoutes(idMap route.HAMap) { @@ -83,13 +73,28 @@ func (n *Notifier) notify() { return } - routeStrings := n.routesToStrings(n.currentRoutes) + allRoutes := slices.Clone(n.currentRoutes) + if n.fakeIPRoute != nil { + allRoutes = append(allRoutes, n.fakeIPRoute) + } + + routeStrings := n.routesToStrings(allRoutes) sort.Strings(routeStrings) go func(l listener.NetworkChangeListener) { - l.OnNetworkChanged(strings.Join(n.addIPv6RangeIfNeeded(routeStrings, n.currentRoutes), ",")) + l.OnNetworkChanged(strings.Join(n.addIPv6RangeIfNeeded(routeStrings, allRoutes), ",")) }(n.listener) } +func filterStatic(routes []*route.Route) []*route.Route { + out := make([]*route.Route, 0, len(routes)) + for _, r := range routes { + if !r.IsDynamic() { + out = append(out, r) + } + } + return out +} + func (n *Notifier) routesToStrings(routes []*route.Route) []string { nets := make([]string, 0, len(routes)) for _, r := range routes { diff --git a/client/internal/routemanager/notifier/notifier_ios.go b/client/internal/routemanager/notifier/notifier_ios.go index bb125cfa4..68c85067a 100644 --- a/client/internal/routemanager/notifier/notifier_ios.go +++ b/client/internal/routemanager/notifier/notifier_ios.go @@ -34,6 +34,10 @@ func (n *Notifier) SetInitialClientRoutes([]*route.Route, []*route.Route) { // iOS doesn't care about initial routes } +func (n *Notifier) SetFakeIPRoute(*route.Route) { + // Not used on iOS +} + func (n *Notifier) OnNewRoutes(route.HAMap) { // Not used on iOS } @@ -53,7 +57,6 @@ func (n *Notifier) OnNewPrefixes(prefixes []netip.Prefix) { n.currentPrefixes = newNets n.notify() } - func (n *Notifier) notify() { n.listenerMux.Lock() defer n.listenerMux.Unlock() diff --git a/client/internal/routemanager/notifier/notifier_other.go b/client/internal/routemanager/notifier/notifier_other.go index 0521e3dc2..97c815cf0 100644 --- a/client/internal/routemanager/notifier/notifier_other.go +++ b/client/internal/routemanager/notifier/notifier_other.go @@ -23,6 +23,10 @@ func (n *Notifier) SetInitialClientRoutes([]*route.Route, []*route.Route) { // Not used on non-mobile platforms } +func (n *Notifier) SetFakeIPRoute(*route.Route) { + // Not used on non-mobile platforms +} + func (n *Notifier) OnNewRoutes(idMap route.HAMap) { // Not used on non-mobile platforms } diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go index 3e2da7f4e..043673904 100644 --- a/client/ios/NetBirdSDK/client.go +++ b/client/ios/NetBirdSDK/client.go @@ -161,7 +161,11 @@ func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error { cfg.WgIface = interfaceName c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder) - return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, c.stateFile) + hostDNS := []netip.AddrPort{ + netip.MustParseAddrPort("9.9.9.9:53"), + netip.MustParseAddrPort("149.112.112.112:53"), + } + return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, hostDNS, c.stateFile) } // Stop the internal client and free the resources diff --git a/client/server/server.go b/client/server/server.go index 7c1e70692..e12b6df5b 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -1359,6 +1359,10 @@ func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.Daemon return gstatus.Errorf(codes.FailedPrecondition, "engine not initialized") } + if engine.IsBlockInbound() { + return gstatus.Errorf(codes.FailedPrecondition, "expose requires inbound connections but 'block inbound' is enabled, disable it first") + } + mgr := engine.GetExposeManager() if mgr == nil { return gstatus.Errorf(codes.Internal, "expose manager not available") diff --git a/client/server/state_generic.go b/client/server/state_generic.go index 980ba0cda..86475ca42 100644 --- a/client/server/state_generic.go +++ b/client/server/state_generic.go @@ -9,6 +9,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/config" ) +// registerStates registers all states that need crash recovery cleanup. func registerStates(mgr *statemanager.Manager) { mgr.RegisterState(&dns.ShutdownState{}) mgr.RegisterState(&systemops.ShutdownState{}) diff --git a/client/server/state_linux.go b/client/server/state_linux.go index 019477d8e..b193d4dfa 100644 --- a/client/server/state_linux.go +++ b/client/server/state_linux.go @@ -11,6 +11,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/config" ) +// registerStates registers all states that need crash recovery cleanup. func registerStates(mgr *statemanager.Manager) { mgr.RegisterState(&dns.ShutdownState{}) mgr.RegisterState(&systemops.ShutdownState{}) diff --git a/client/ssh/proxy/proxy.go b/client/ssh/proxy/proxy.go index 8897b9c7e..59007f75c 100644 --- a/client/ssh/proxy/proxy.go +++ b/client/ssh/proxy/proxy.go @@ -141,7 +141,7 @@ func (p *SSHProxy) runProxySSHServer(jwtToken string) error { func (p *SSHProxy) handleSSHSession(session ssh.Session) { ptyReq, winCh, isPty := session.Pty() - hasCommand := len(session.Command()) > 0 + hasCommand := session.RawCommand() != "" sshClient, err := p.getOrCreateBackendClient(session.Context(), session.User()) if err != nil { @@ -180,7 +180,7 @@ func (p *SSHProxy) handleSSHSession(session ssh.Session) { } if hasCommand { - if err := serverSession.Run(strings.Join(session.Command(), " ")); err != nil { + if err := serverSession.Run(session.RawCommand()); err != nil { log.Debugf("run command: %v", err) p.handleProxyExitCode(session, err) } diff --git a/client/ssh/proxy/proxy_test.go b/client/ssh/proxy/proxy_test.go index dba2e88da..b33d5f8f4 100644 --- a/client/ssh/proxy/proxy_test.go +++ b/client/ssh/proxy/proxy_test.go @@ -1,6 +1,7 @@ package proxy import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -245,6 +246,191 @@ func TestSSHProxy_Connect(t *testing.T) { cancel() } +// TestSSHProxy_CommandQuoting verifies that the proxy preserves shell quoting +// when forwarding commands to the backend. This is critical for tools like +// Ansible that send commands such as: +// +// /bin/sh -c '( umask 77 && mkdir -p ... ) && sleep 0' +// +// The single quotes must be preserved so the backend shell receives the +// subshell expression as a single argument to -c. +func TestSSHProxy_CommandQuoting(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + sshClient, cleanup := setupProxySSHClient(t) + defer cleanup() + + // These commands simulate what the SSH protocol delivers as exec payloads. + // When a user types: ssh host '/bin/sh -c "( echo hello )"' + // the local shell strips the outer single quotes, and the SSH exec request + // contains the raw string: /bin/sh -c "( echo hello )" + // + // The proxy must forward this string verbatim. Using session.Command() + // (shlex.Split + strings.Join) strips the inner double quotes, breaking + // the command on the backend. + tests := []struct { + name string + command string + expect string + }{ + { + name: "subshell_in_double_quotes", + command: `/bin/sh -c "( echo from-subshell ) && echo outer"`, + expect: "from-subshell\nouter\n", + }, + { + name: "printf_with_special_chars", + command: `/bin/sh -c "printf '%s\n' 'hello world'"`, + expect: "hello world\n", + }, + { + name: "nested_command_substitution", + command: `/bin/sh -c "echo $(echo nested)"`, + expect: "nested\n", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + session, err := sshClient.NewSession() + require.NoError(t, err) + defer func() { _ = session.Close() }() + + var stderrBuf bytes.Buffer + session.Stderr = &stderrBuf + + outputCh := make(chan []byte, 1) + errCh := make(chan error, 1) + go func() { + output, err := session.Output(tc.command) + outputCh <- output + errCh <- err + }() + + select { + case output := <-outputCh: + err := <-errCh + if stderrBuf.Len() > 0 { + t.Logf("stderr: %s", stderrBuf.String()) + } + require.NoError(t, err, "command should succeed: %s", tc.command) + assert.Equal(t, tc.expect, string(output), "output mismatch for: %s", tc.command) + case <-time.After(5 * time.Second): + t.Fatalf("command timed out: %s", tc.command) + } + }) + } +} + +// setupProxySSHClient creates a full proxy test environment and returns +// an SSH client connected through the proxy to a backend NetBird SSH server. +func setupProxySSHClient(t *testing.T) (*cryptossh.Client, func()) { + t.Helper() + + const ( + issuer = "https://test-issuer.example.com" + audience = "test-audience" + ) + + jwksServer, privateKey, jwksURL := setupJWKSServer(t) + + hostKey, err := nbssh.GeneratePrivateKey(nbssh.ED25519) + require.NoError(t, err) + hostPubKey, err := nbssh.GeneratePublicKey(hostKey) + require.NoError(t, err) + + serverConfig := &server.Config{ + HostKeyPEM: hostKey, + JWT: &server.JWTConfig{ + Issuer: issuer, + Audiences: []string{audience}, + KeysLocation: jwksURL, + }, + } + sshServer := server.New(serverConfig) + sshServer.SetAllowRootLogin(true) + + testUsername := testutil.GetTestUsername(t) + testJWTUser := "test-username" + testUserHash, err := sshuserhash.HashUserID(testJWTUser) + require.NoError(t, err) + + authConfig := &sshauth.Config{ + UserIDClaim: sshauth.DefaultUserIDClaim, + AuthorizedUsers: []sshuserhash.UserIDHash{testUserHash}, + MachineUsers: map[string][]uint32{ + testUsername: {0}, + }, + } + sshServer.UpdateSSHAuth(authConfig) + + sshServerAddr := server.StartTestServer(t, sshServer) + + mockDaemon := startMockDaemon(t) + + host, portStr, err := net.SplitHostPort(sshServerAddr) + require.NoError(t, err) + port, err := strconv.Atoi(portStr) + require.NoError(t, err) + + mockDaemon.setHostKey(host, hostPubKey) + + validToken := generateValidJWT(t, privateKey, issuer, audience, testJWTUser) + mockDaemon.setJWTToken(validToken) + + proxyInstance, err := New(mockDaemon.addr, host, port, io.Discard, nil) + require.NoError(t, err) + + origStdin := os.Stdin + origStdout := os.Stdout + + stdinReader, stdinWriter, err := os.Pipe() + require.NoError(t, err) + stdoutReader, stdoutWriter, err := os.Pipe() + require.NoError(t, err) + + os.Stdin = stdinReader + os.Stdout = stdoutWriter + + clientConn, proxyConn := net.Pipe() + + go func() { _, _ = io.Copy(stdinWriter, proxyConn) }() + go func() { _, _ = io.Copy(proxyConn, stdoutReader) }() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + go func() { + _ = proxyInstance.Connect(ctx) + }() + + sshConfig := &cryptossh.ClientConfig{ + User: testutil.GetTestUsername(t), + Auth: []cryptossh.AuthMethod{}, + HostKeyCallback: cryptossh.InsecureIgnoreHostKey(), + Timeout: 5 * time.Second, + } + + sshClientConn, chans, reqs, err := cryptossh.NewClientConn(clientConn, "test", sshConfig) + require.NoError(t, err) + + client := cryptossh.NewClient(sshClientConn, chans, reqs) + + cleanupFn := func() { + _ = client.Close() + _ = clientConn.Close() + cancel() + os.Stdin = origStdin + os.Stdout = origStdout + _ = sshServer.Stop() + mockDaemon.stop() + jwksServer.Close() + } + + return client, cleanupFn +} + type mockDaemonServer struct { proto.UnimplementedDaemonServiceServer hostKeys map[string][]byte diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go index 4431ae423..82d3b700f 100644 --- a/client/ssh/server/server.go +++ b/client/ssh/server/server.go @@ -284,19 +284,21 @@ func (s *Server) closeListener(ln net.Listener) { // Stop closes the SSH server func (s *Server) Stop() error { s.mu.Lock() - defer s.mu.Unlock() - - if s.sshServer == nil { + sshServer := s.sshServer + if sshServer == nil { + s.mu.Unlock() return nil } + s.sshServer = nil + s.listener = nil + s.mu.Unlock() - if err := s.sshServer.Close(); err != nil { + // Close outside the lock: session handlers need s.mu for unregisterSession. + if err := sshServer.Close(); err != nil { log.Debugf("close SSH server: %v", err) } - s.sshServer = nil - s.listener = nil - + s.mu.Lock() maps.Clear(s.sessions) maps.Clear(s.pendingAuthJWT) maps.Clear(s.connections) @@ -307,6 +309,7 @@ func (s *Server) Stop() error { } } maps.Clear(s.remoteForwardListeners) + s.mu.Unlock() return nil } diff --git a/client/ssh/server/session_handlers.go b/client/ssh/server/session_handlers.go index f12a75961..0e531bb96 100644 --- a/client/ssh/server/session_handlers.go +++ b/client/ssh/server/session_handlers.go @@ -60,7 +60,7 @@ func (s *Server) sessionHandler(session ssh.Session) { } ptyReq, winCh, isPty := session.Pty() - hasCommand := len(session.Command()) > 0 + hasCommand := session.RawCommand() != "" if isPty && !hasCommand { // ssh - PTY interactive session (login) diff --git a/client/system/info.go b/client/system/info.go index 01176e765..f2546cfe6 100644 --- a/client/system/info.go +++ b/client/system/info.go @@ -153,6 +153,9 @@ func networkAddresses() ([]NetworkAddress, error) { var netAddresses []NetworkAddress for _, iface := range interfaces { + if iface.Flags&net.FlagUp == 0 { + continue + } if iface.HardwareAddr.String() == "" { continue } diff --git a/client/system/info_freebsd.go b/client/system/info_freebsd.go index 8e1353151..755172842 100644 --- a/client/system/info_freebsd.go +++ b/client/system/info_freebsd.go @@ -43,18 +43,24 @@ func GetInfo(ctx context.Context) *Info { systemHostname, _ := os.Hostname() + addrs, err := networkAddresses() + if err != nil { + log.Warnf("failed to discover network addresses: %s", err) + } + return &Info{ - GoOS: runtime.GOOS, - Kernel: osInfo[0], - Platform: runtime.GOARCH, - OS: osName, - OSVersion: osVersion, - Hostname: extractDeviceName(ctx, systemHostname), - CPUs: runtime.NumCPU(), - NetbirdVersion: version.NetbirdVersion(), - UIVersion: extractUserAgent(ctx), - KernelVersion: osInfo[1], - Environment: env, + GoOS: runtime.GOOS, + Kernel: osInfo[0], + Platform: runtime.GOARCH, + OS: osName, + OSVersion: osVersion, + Hostname: extractDeviceName(ctx, systemHostname), + CPUs: runtime.NumCPU(), + NetbirdVersion: version.NetbirdVersion(), + UIVersion: extractUserAgent(ctx), + KernelVersion: osInfo[1], + NetworkAddresses: addrs, + Environment: env, } } diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index 0574e53d0..b1e0aec41 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -324,6 +324,7 @@ type serviceClient struct { exitNodeMu sync.Mutex mExitNodeItems []menuHandler exitNodeRetryCancel context.CancelFunc + mExitNodeSeparator *systray.MenuItem mExitNodeDeselectAll *systray.MenuItem logFile string wLoginURL fyne.Window diff --git a/client/ui/debug.go b/client/ui/debug.go index 29f73a66a..4ebe4d675 100644 --- a/client/ui/debug.go +++ b/client/ui/debug.go @@ -24,9 +24,10 @@ import ( // Initial state for the debug collection type debugInitialState struct { - wasDown bool - logLevel proto.LogLevel - isLevelTrace bool + wasDown bool + needsRestoreUp bool + logLevel proto.LogLevel + isLevelTrace bool } // Debug collection parameters @@ -371,46 +372,51 @@ func (s *serviceClient) configureServiceForDebug( conn proto.DaemonServiceClient, state *debugInitialState, enablePersistence bool, -) error { +) { if state.wasDown { if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { - return fmt.Errorf("bring service up: %v", err) + log.Warnf("failed to bring service up: %v", err) + } else { + log.Info("Service brought up for debug") + time.Sleep(time.Second * 10) } - log.Info("Service brought up for debug") - time.Sleep(time.Second * 10) } if !state.isLevelTrace { if _, err := conn.SetLogLevel(s.ctx, &proto.SetLogLevelRequest{Level: proto.LogLevel_TRACE}); err != nil { - return fmt.Errorf("set log level to TRACE: %v", err) + log.Warnf("failed to set log level to TRACE: %v", err) + } else { + log.Info("Log level set to TRACE for debug") } - log.Info("Log level set to TRACE for debug") } if _, err := conn.Down(s.ctx, &proto.DownRequest{}); err != nil { - return fmt.Errorf("bring service down: %v", err) + log.Warnf("failed to bring service down: %v", err) + } else { + state.needsRestoreUp = !state.wasDown + time.Sleep(time.Second) } - time.Sleep(time.Second) if enablePersistence { if _, err := conn.SetSyncResponsePersistence(s.ctx, &proto.SetSyncResponsePersistenceRequest{ Enabled: true, }); err != nil { - return fmt.Errorf("enable sync response persistence: %v", err) + log.Warnf("failed to enable sync response persistence: %v", err) + } else { + log.Info("Sync response persistence enabled for debug") } - log.Info("Sync response persistence enabled for debug") } if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { - return fmt.Errorf("bring service back up: %v", err) + log.Warnf("failed to bring service back up: %v", err) + } else { + state.needsRestoreUp = false + time.Sleep(time.Second * 3) } - time.Sleep(time.Second * 3) if _, err := conn.StartCPUProfile(s.ctx, &proto.StartCPUProfileRequest{}); err != nil { log.Warnf("failed to start CPU profiling: %v", err) } - - return nil } func (s *serviceClient) collectDebugData( @@ -424,9 +430,7 @@ func (s *serviceClient) collectDebugData( var wg sync.WaitGroup startProgressTracker(ctx, &wg, params.duration, progress) - if err := s.configureServiceForDebug(conn, state, params.enablePersistence); err != nil { - return err - } + s.configureServiceForDebug(conn, state, params.enablePersistence) wg.Wait() progress.progressBar.Hide() @@ -482,9 +486,17 @@ func (s *serviceClient) createDebugBundleFromCollection( // Restore service to original state func (s *serviceClient) restoreServiceState(conn proto.DaemonServiceClient, state *debugInitialState) { + if state.needsRestoreUp { + if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil { + log.Warnf("failed to restore up state: %v", err) + } else { + log.Info("Service state restored to up") + } + } + if state.wasDown { if _, err := conn.Down(s.ctx, &proto.DownRequest{}); err != nil { - log.Errorf("Failed to restore down state: %v", err) + log.Warnf("failed to restore down state: %v", err) } else { log.Info("Service state restored to down") } @@ -492,7 +504,7 @@ func (s *serviceClient) restoreServiceState(conn proto.DaemonServiceClient, stat if !state.isLevelTrace { if _, err := conn.SetLogLevel(s.ctx, &proto.SetLogLevelRequest{Level: state.logLevel}); err != nil { - log.Errorf("Failed to restore log level: %v", err) + log.Warnf("failed to restore log level: %v", err) } else { log.Info("Log level restored to original setting") } diff --git a/client/ui/network.go b/client/ui/network.go index ed03f5ada..571e871bb 100644 --- a/client/ui/network.go +++ b/client/ui/network.go @@ -421,6 +421,10 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { node.Remove() } s.mExitNodeItems = nil + if s.mExitNodeSeparator != nil { + s.mExitNodeSeparator.Remove() + s.mExitNodeSeparator = nil + } if s.mExitNodeDeselectAll != nil { s.mExitNodeDeselectAll.Remove() s.mExitNodeDeselectAll = nil @@ -453,31 +457,37 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) { } if showDeselectAll { - s.mExitNode.AddSeparator() - deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All") - s.mExitNodeDeselectAll = deselectAllItem - go func() { - for { - _, ok := <-deselectAllItem.ClickedCh - if !ok { - // channel closed: exit the goroutine - return - } - exitNodes, err := s.handleExitNodeMenuDeselectAll() - if err != nil { - log.Warnf("failed to handle deselect all exit nodes: %v", err) - } else { - s.exitNodeMu.Lock() - s.recreateExitNodeMenu(exitNodes) - s.exitNodeMu.Unlock() - } - } - - }() + s.addExitNodeDeselectAll() } } +func (s *serviceClient) addExitNodeDeselectAll() { + sep := s.mExitNode.AddSubMenuItem("───────────────", "") + sep.Disable() + s.mExitNodeSeparator = sep + + deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All") + s.mExitNodeDeselectAll = deselectAllItem + + go func() { + for { + _, ok := <-deselectAllItem.ClickedCh + if !ok { + return + } + exitNodes, err := s.handleExitNodeMenuDeselectAll() + if err != nil { + log.Warnf("failed to handle deselect all exit nodes: %v", err) + } else { + s.exitNodeMu.Lock() + s.recreateExitNodeMenu(exitNodes) + s.exitNodeMu.Unlock() + } + } + }() +} + func (s *serviceClient) getExitNodes(conn proto.DaemonServiceClient) ([]*proto.Network, error) { ctx, cancel := context.WithTimeout(s.ctx, defaultFailTimeout) defer cancel() diff --git a/combined/cmd/config.go b/combined/cmd/config.go index 85664d0d2..ce4df8394 100644 --- a/combined/cmd/config.go +++ b/combined/cmd/config.go @@ -179,9 +179,11 @@ type StoreConfig struct { // ReverseProxyConfig contains reverse proxy settings type ReverseProxyConfig struct { - TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"` - TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"` - TrustedPeers []string `yaml:"trustedPeers"` + TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"` + TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"` + TrustedPeers []string `yaml:"trustedPeers"` + AccessLogRetentionDays int `yaml:"accessLogRetentionDays"` + AccessLogCleanupIntervalHours int `yaml:"accessLogCleanupIntervalHours"` } // DefaultConfig returns a CombinedConfig with default values @@ -645,7 +647,9 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) { // Build reverse proxy config reverseProxy := nbconfig.ReverseProxy{ - TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount, + TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount, + AccessLogRetentionDays: mgmt.ReverseProxy.AccessLogRetentionDays, + AccessLogCleanupIntervalHours: mgmt.ReverseProxy.AccessLogCleanupIntervalHours, } for _, p := range mgmt.ReverseProxy.TrustedHTTPProxies { if prefix, err := netip.ParsePrefix(p); err == nil { diff --git a/combined/cmd/root.go b/combined/cmd/root.go index ea1ff908a..db986b4d4 100644 --- a/combined/cmd/root.go +++ b/combined/cmd/root.go @@ -29,6 +29,7 @@ import ( "github.com/netbirdio/netbird/management/server/telemetry" "github.com/netbirdio/netbird/relay/healthcheck" relayServer "github.com/netbirdio/netbird/relay/server" + "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/relay/server/listener/ws" sharedMetrics "github.com/netbirdio/netbird/shared/metrics" "github.com/netbirdio/netbird/shared/relay/auth" @@ -523,7 +524,7 @@ func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (* func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, relaySrv *relayServer.Server, meter metric.Meter, cfg *CombinedConfig) http.Handler { wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter)) - var relayAcceptFn func(conn net.Conn) + var relayAcceptFn func(conn listener.Conn) if relaySrv != nil { relayAcceptFn = relaySrv.RelayAccept() } @@ -563,7 +564,7 @@ func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, re } // handleRelayWebSocket handles incoming WebSocket connections for the relay service -func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn net.Conn), cfg *CombinedConfig) { +func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn listener.Conn), cfg *CombinedConfig) { acceptOptions := &websocket.AcceptOptions{ OriginPatterns: []string{"*"}, } @@ -585,15 +586,9 @@ func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func( return } - lAddr, err := net.ResolveTCPAddr("tcp", cfg.Server.ListenAddress) - if err != nil { - _ = wsConn.Close(websocket.StatusInternalError, "internal error") - return - } - log.Debugf("Relay WS client connected from: %s", rAddr) - conn := ws.NewConn(wsConn, lAddr, rAddr) + conn := ws.NewConn(wsConn, rAddr) acceptFn(conn) } diff --git a/flow/client/client.go b/flow/client/client.go index 318fcfe1e..8ad637974 100644 --- a/flow/client/client.go +++ b/flow/client/client.go @@ -14,7 +14,6 @@ import ( log "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" @@ -26,11 +25,22 @@ import ( "github.com/netbirdio/netbird/util/wsproxy" ) +var ErrClientClosed = errors.New("client is closed") + +// minHealthyDuration is the minimum time a stream must survive before a failure +// resets the backoff timer. Streams that fail faster are considered unhealthy and +// should not reset backoff, so that MaxElapsedTime can eventually stop retries. +const minHealthyDuration = 5 * time.Second + type GRPCClient struct { realClient proto.FlowServiceClient clientConn *grpc.ClientConn stream proto.FlowService_EventsClient - streamMu sync.Mutex + target string + opts []grpc.DialOption + closed bool // prevent creating conn in the middle of the Close + receiving bool // prevent concurrent Receive calls + mu sync.Mutex // protects clientConn, realClient, stream, closed, and receiving } func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCClient, error) { @@ -65,7 +75,8 @@ func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCCl grpc.WithDefaultServiceConfig(`{"healthCheckConfig": {"serviceName": ""}}`), ) - conn, err := grpc.NewClient(fmt.Sprintf("%s:%s", parsedURL.Hostname(), parsedURL.Port()), opts...) + target := parsedURL.Host + conn, err := grpc.NewClient(target, opts...) if err != nil { return nil, fmt.Errorf("creating new grpc client: %w", err) } @@ -73,30 +84,73 @@ func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCCl return &GRPCClient{ realClient: proto.NewFlowServiceClient(conn), clientConn: conn, + target: target, + opts: opts, }, nil } func (c *GRPCClient) Close() error { - c.streamMu.Lock() - defer c.streamMu.Unlock() - + c.mu.Lock() + c.closed = true c.stream = nil - if err := c.clientConn.Close(); err != nil && !errors.Is(err, context.Canceled) { + conn := c.clientConn + c.clientConn = nil + c.mu.Unlock() + + if conn == nil { + return nil + } + + if err := conn.Close(); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("close client connection: %w", err) } return nil } +func (c *GRPCClient) Send(event *proto.FlowEvent) error { + c.mu.Lock() + stream := c.stream + c.mu.Unlock() + + if stream == nil { + return errors.New("stream not initialized") + } + + if err := stream.Send(event); err != nil { + return fmt.Errorf("send flow event: %w", err) + } + + return nil +} + func (c *GRPCClient) Receive(ctx context.Context, interval time.Duration, msgHandler func(msg *proto.FlowEventAck) error) error { + c.mu.Lock() + if c.receiving { + c.mu.Unlock() + return errors.New("concurrent Receive calls are not supported") + } + c.receiving = true + c.mu.Unlock() + defer func() { + c.mu.Lock() + c.receiving = false + c.mu.Unlock() + }() + backOff := defaultBackoff(ctx, interval) operation := func() error { - if err := c.establishStreamAndReceive(ctx, msgHandler); err != nil { - if s, ok := status.FromError(err); ok && s.Code() == codes.Canceled { - return fmt.Errorf("receive: %w: %w", err, context.Canceled) - } + stream, err := c.establishStream(ctx) + if err != nil { + log.Errorf("failed to establish flow stream, retrying: %v", err) + return c.handleRetryableError(err, time.Time{}, backOff) + } + + streamStart := time.Now() + + if err := c.receive(stream, msgHandler); err != nil { log.Errorf("receive failed: %v", err) - return fmt.Errorf("receive: %w", err) + return c.handleRetryableError(err, streamStart, backOff) } return nil } @@ -108,37 +162,106 @@ func (c *GRPCClient) Receive(ctx context.Context, interval time.Duration, msgHan return nil } -func (c *GRPCClient) establishStreamAndReceive(ctx context.Context, msgHandler func(msg *proto.FlowEventAck) error) error { - if c.clientConn.GetState() == connectivity.Shutdown { - return errors.New("connection to flow receiver has been shut down") +// handleRetryableError resets the backoff timer if the stream was healthy long +// enough and recreates the underlying ClientConn so that gRPC's internal +// subchannel backoff does not accumulate and compete with our own retry timer. +// A zero streamStart means the stream was never established. +func (c *GRPCClient) handleRetryableError(err error, streamStart time.Time, backOff backoff.BackOff) error { + if isContextDone(err) { + return backoff.Permanent(err) } - stream, err := c.realClient.Events(ctx, grpc.WaitForReady(true)) - if err != nil { - return fmt.Errorf("create event stream: %w", err) + var permErr *backoff.PermanentError + if errors.As(err, &permErr) { + return err } - err = stream.Send(&proto.FlowEvent{IsInitiator: true}) + // Reset the backoff so the next retry starts with a short delay instead of + // continuing the already-elapsed timer. Only do this if the stream was healthy + // long enough; short-lived connect/drop cycles must not defeat MaxElapsedTime. + if !streamStart.IsZero() && time.Since(streamStart) >= minHealthyDuration { + backOff.Reset() + } + + if recreateErr := c.recreateConnection(); recreateErr != nil { + log.Errorf("recreate connection: %v", recreateErr) + return recreateErr + } + + log.Infof("connection recreated, retrying stream") + return fmt.Errorf("retrying after error: %w", err) +} + +func (c *GRPCClient) recreateConnection() error { + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return backoff.Permanent(ErrClientClosed) + } + + conn, err := grpc.NewClient(c.target, c.opts...) if err != nil { - log.Infof("failed to send initiator message to flow receiver but will attempt to continue. Error: %s", err) + c.mu.Unlock() + return fmt.Errorf("create new connection: %w", err) + } + + old := c.clientConn + c.clientConn = conn + c.realClient = proto.NewFlowServiceClient(conn) + c.stream = nil + c.mu.Unlock() + + _ = old.Close() + + return nil +} + +func (c *GRPCClient) establishStream(ctx context.Context) (proto.FlowService_EventsClient, error) { + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return nil, backoff.Permanent(ErrClientClosed) + } + cl := c.realClient + c.mu.Unlock() + + // open stream outside the lock — blocking operation + stream, err := cl.Events(ctx) + if err != nil { + return nil, fmt.Errorf("create event stream: %w", err) + } + streamReady := false + defer func() { + if !streamReady { + _ = stream.CloseSend() + } + }() + + if err = stream.Send(&proto.FlowEvent{IsInitiator: true}); err != nil { + return nil, fmt.Errorf("send initiator: %w", err) } if err = checkHeader(stream); err != nil { - return fmt.Errorf("check header: %w", err) + return nil, fmt.Errorf("check header: %w", err) } - c.streamMu.Lock() + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return nil, backoff.Permanent(ErrClientClosed) + } c.stream = stream - c.streamMu.Unlock() + c.mu.Unlock() + streamReady = true - return c.receive(stream, msgHandler) + return stream, nil } func (c *GRPCClient) receive(stream proto.FlowService_EventsClient, msgHandler func(msg *proto.FlowEventAck) error) error { for { msg, err := stream.Recv() if err != nil { - return fmt.Errorf("receive from stream: %w", err) + return err } if msg.IsInitiator { @@ -169,7 +292,7 @@ func checkHeader(stream proto.FlowService_EventsClient) error { func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff { return backoff.WithContext(&backoff.ExponentialBackOff{ InitialInterval: 800 * time.Millisecond, - RandomizationFactor: 1, + RandomizationFactor: 0.5, Multiplier: 1.7, MaxInterval: interval / 2, MaxElapsedTime: 3 * 30 * 24 * time.Hour, // 3 months @@ -178,18 +301,12 @@ func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff }, ctx) } -func (c *GRPCClient) Send(event *proto.FlowEvent) error { - c.streamMu.Lock() - stream := c.stream - c.streamMu.Unlock() - - if stream == nil { - return errors.New("stream not initialized") +func isContextDone(err error) bool { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return true } - - if err := stream.Send(event); err != nil { - return fmt.Errorf("send flow event: %w", err) + if s, ok := status.FromError(err); ok { + return s.Code() == codes.Canceled || s.Code() == codes.DeadlineExceeded } - - return nil + return false } diff --git a/flow/client/client_test.go b/flow/client/client_test.go index efe01c003..55157acbc 100644 --- a/flow/client/client_test.go +++ b/flow/client/client_test.go @@ -2,8 +2,11 @@ package client_test import ( "context" + "encoding/binary" "errors" "net" + "sync" + "sync/atomic" "testing" "time" @@ -11,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" flow "github.com/netbirdio/netbird/flow/client" "github.com/netbirdio/netbird/flow/proto" @@ -18,21 +23,89 @@ import ( type testServer struct { proto.UnimplementedFlowServiceServer - events chan *proto.FlowEvent - acks chan *proto.FlowEventAck - grpcSrv *grpc.Server - addr string + events chan *proto.FlowEvent + acks chan *proto.FlowEventAck + grpcSrv *grpc.Server + addr string + listener *connTrackListener + closeStream chan struct{} // signal server to close the stream + handlerDone chan struct{} // signaled each time Events() exits + handlerStarted chan struct{} // signaled each time Events() begins +} + +// connTrackListener wraps a net.Listener to track accepted connections +// so tests can forcefully close them to simulate PROTOCOL_ERROR/RST_STREAM. +type connTrackListener struct { + net.Listener + mu sync.Mutex + conns []net.Conn +} + +func (l *connTrackListener) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + l.mu.Lock() + l.conns = append(l.conns, c) + l.mu.Unlock() + return c, nil +} + +// sendRSTStream writes a raw HTTP/2 RST_STREAM frame with PROTOCOL_ERROR +// (error code 0x1) on every tracked connection. This produces the exact error: +// +// rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR +// +// HTTP/2 RST_STREAM frame format (9-byte header + 4-byte payload): +// +// Length (3 bytes): 0x000004 +// Type (1 byte): 0x03 (RST_STREAM) +// Flags (1 byte): 0x00 +// Stream ID (4 bytes): target stream (must have bit 31 clear) +// Error Code (4 bytes): 0x00000001 (PROTOCOL_ERROR) +func (l *connTrackListener) connCount() int { + l.mu.Lock() + defer l.mu.Unlock() + return len(l.conns) +} + +func (l *connTrackListener) sendRSTStream(streamID uint32) { + l.mu.Lock() + defer l.mu.Unlock() + + frame := make([]byte, 13) // 9-byte header + 4-byte payload + // Length = 4 (3 bytes, big-endian) + frame[0], frame[1], frame[2] = 0, 0, 4 + // Type = RST_STREAM (0x03) + frame[3] = 0x03 + // Flags = 0 + frame[4] = 0x00 + // Stream ID (4 bytes, big-endian, bit 31 reserved = 0) + binary.BigEndian.PutUint32(frame[5:9], streamID) + // Error Code = PROTOCOL_ERROR (0x1) + binary.BigEndian.PutUint32(frame[9:13], 0x1) + + for _, c := range l.conns { + _, _ = c.Write(frame) + } } func newTestServer(t *testing.T) *testServer { - listener, err := net.Listen("tcp", "127.0.0.1:0") + rawListener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) + listener := &connTrackListener{Listener: rawListener} + s := &testServer{ - events: make(chan *proto.FlowEvent, 100), - acks: make(chan *proto.FlowEventAck, 100), - grpcSrv: grpc.NewServer(), - addr: listener.Addr().String(), + events: make(chan *proto.FlowEvent, 100), + acks: make(chan *proto.FlowEventAck, 100), + grpcSrv: grpc.NewServer(), + addr: rawListener.Addr().String(), + listener: listener, + closeStream: make(chan struct{}, 1), + handlerDone: make(chan struct{}, 10), + handlerStarted: make(chan struct{}, 10), } proto.RegisterFlowServiceServer(s.grpcSrv, s) @@ -51,11 +124,23 @@ func newTestServer(t *testing.T) *testServer { } func (s *testServer) Events(stream proto.FlowService_EventsServer) error { + defer func() { + select { + case s.handlerDone <- struct{}{}: + default: + } + }() + err := stream.Send(&proto.FlowEventAck{IsInitiator: true}) if err != nil { return err } + select { + case s.handlerStarted <- struct{}{}: + default: + } + ctx, cancel := context.WithCancel(stream.Context()) defer cancel() @@ -91,6 +176,8 @@ func (s *testServer) Events(stream proto.FlowService_EventsServer) error { if err := stream.Send(ack); err != nil { return err } + case <-s.closeStream: + return status.Errorf(codes.Internal, "server closing stream") case <-ctx.Done(): return ctx.Err() } @@ -110,16 +197,13 @@ func TestReceive(t *testing.T) { assert.NoError(t, err, "failed to close flow") }) - receivedAcks := make(map[string]bool) + var ackCount atomic.Int32 receiveDone := make(chan struct{}) go func() { err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { if !msg.IsInitiator && len(msg.EventId) > 0 { - id := string(msg.EventId) - receivedAcks[id] = true - - if len(receivedAcks) >= 3 { + if ackCount.Add(1) >= 3 { close(receiveDone) } } @@ -130,7 +214,11 @@ func TestReceive(t *testing.T) { } }() - time.Sleep(500 * time.Millisecond) + select { + case <-server.handlerStarted: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for stream to be established") + } for i := 0; i < 3; i++ { eventID := uuid.New().String() @@ -153,7 +241,7 @@ func TestReceive(t *testing.T) { t.Fatal("timeout waiting for acks to be processed") } - assert.Equal(t, 3, len(receivedAcks)) + assert.Equal(t, int32(3), ackCount.Load()) } func TestReceive_ContextCancellation(t *testing.T) { @@ -254,3 +342,195 @@ func TestSend(t *testing.T) { t.Fatal("timeout waiting for ack to be received by flow") } } + +func TestNewClient_PermanentClose(t *testing.T) { + server := newTestServer(t) + + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + + err = client.Close() + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + done := make(chan error, 1) + go func() { + done <- client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + return nil + }) + }() + + select { + case err := <-done: + require.ErrorIs(t, err, flow.ErrClientClosed) + case <-time.After(2 * time.Second): + t.Fatal("Receive did not return after Close — stuck in retry loop") + } +} + +func TestNewClient_CloseVerify(t *testing.T) { + server := newTestServer(t) + + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + t.Cleanup(cancel) + + done := make(chan error, 1) + go func() { + done <- client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + return nil + }) + }() + + closeDone := make(chan struct{}, 1) + go func() { + _ = client.Close() + closeDone <- struct{}{} + }() + + select { + case err := <-done: + require.Error(t, err) + case <-time.After(2 * time.Second): + t.Fatal("Receive did not return after Close — stuck in retry loop") + } + + select { + case <-closeDone: + return + case <-time.After(2 * time.Second): + t.Fatal("Close did not return — blocked in retry loop") + } + +} + +func TestClose_WhileReceiving(t *testing.T) { + server := newTestServer(t) + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + + ctx := context.Background() // no timeout — intentional + receiveDone := make(chan struct{}) + go func() { + _ = client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + return nil + }) + close(receiveDone) + }() + + // Wait for the server-side handler to confirm the stream is established. + select { + case <-server.handlerStarted: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for stream to be established") + } + + closeDone := make(chan struct{}) + go func() { + _ = client.Close() + close(closeDone) + }() + + select { + case <-closeDone: + // Close returned — good + case <-time.After(2 * time.Second): + t.Fatal("Close blocked forever — Receive stuck in retry loop") + } + + select { + case <-receiveDone: + case <-time.After(2 * time.Second): + t.Fatal("Receive did not exit after Close") + } +} + +func TestReceive_ProtocolErrorStreamReconnect(t *testing.T) { + server := newTestServer(t) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second) + require.NoError(t, err) + t.Cleanup(func() { + err := client.Close() + assert.NoError(t, err, "failed to close flow") + }) + + // Track acks received before and after server-side stream close + var ackCount atomic.Int32 + receivedFirst := make(chan struct{}) + receivedAfterReconnect := make(chan struct{}) + + go func() { + err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error { + if msg.IsInitiator || len(msg.EventId) == 0 { + return nil + } + n := ackCount.Add(1) + if n == 1 { + close(receivedFirst) + } + if n == 2 { + close(receivedAfterReconnect) + } + return nil + }) + if err != nil && !errors.Is(err, context.Canceled) { + t.Logf("receive error: %v", err) + } + }() + + // Wait for stream to be established, then send first ack + select { + case <-server.handlerStarted: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for stream to be established") + } + server.acks <- &proto.FlowEventAck{EventId: []byte("before-close")} + + select { + case <-receivedFirst: + case <-time.After(3 * time.Second): + t.Fatal("timeout waiting for first ack") + } + + // Snapshot connection count before injecting the fault. + connsBefore := server.listener.connCount() + + // Send a raw HTTP/2 RST_STREAM frame with PROTOCOL_ERROR on the TCP connection. + // gRPC multiplexes streams on stream IDs 1, 3, 5, ... (odd, client-initiated). + // Stream ID 1 is the client's first stream (our Events bidi stream). + // This produces the exact error the client sees in production: + // "stream terminated by RST_STREAM with error code: PROTOCOL_ERROR" + server.listener.sendRSTStream(1) + + // Wait for the old Events() handler to fully exit so it can no longer + // drain s.acks and drop our injected ack on a broken stream. + select { + case <-server.handlerDone: + case <-time.After(5 * time.Second): + t.Fatal("old Events() handler did not exit after RST_STREAM") + } + + require.Eventually(t, func() bool { + return server.listener.connCount() > connsBefore + }, 5*time.Second, 50*time.Millisecond, "client did not open a new TCP connection after RST_STREAM") + + server.acks <- &proto.FlowEventAck{EventId: []byte("after-close")} + + select { + case <-receivedAfterReconnect: + // Client successfully reconnected and received ack after server-side stream close + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for ack after server-side stream close — client did not reconnect") + } + + assert.GreaterOrEqual(t, int(ackCount.Load()), 2, "should have received acks before and after stream close") + assert.GreaterOrEqual(t, server.listener.connCount(), 2, "client should have created at least 2 TCP connections (original + reconnect)") +} diff --git a/go.mod b/go.mod index 3661d6fe0..a95192600 100644 --- a/go.mod +++ b/go.mod @@ -17,13 +17,13 @@ require ( github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 github.com/vishvananda/netlink v1.3.1 - golang.org/x/crypto v0.46.0 - golang.org/x/sys v0.39.0 + golang.org/x/crypto v0.48.0 + golang.org/x/sys v0.41.0 golang.zx2c4.com/wireguard v0.0.0-20230704135630-469159ecf7d1 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 golang.zx2c4.com/wireguard/windows v0.5.3 - google.golang.org/grpc v1.77.0 - google.golang.org/protobuf v1.36.10 + google.golang.org/grpc v1.79.3 + google.golang.org/protobuf v1.36.11 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -33,6 +33,7 @@ require ( github.com/awnumar/memguard v0.23.0 github.com/aws/aws-sdk-go-v2 v1.36.3 github.com/aws/aws-sdk-go-v2/config v1.29.14 + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 github.com/c-robinson/iplib v1.0.3 github.com/caddyserver/certmagic v0.21.3 @@ -48,6 +49,7 @@ require ( github.com/eko/gocache/store/redis/v4 v4.2.2 github.com/fsnotify/fsnotify v1.9.0 github.com/gliderlabs/ssh v0.3.8 + github.com/go-jose/go-jose/v4 v4.1.3 github.com/godbus/dbus/v5 v5.1.0 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/golang/mock v1.6.0 @@ -61,6 +63,7 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/jackc/pgx/v5 v5.5.5 github.com/libdns/route53 v1.5.0 + github.com/libp2p/go-nat v0.2.0 github.com/libp2p/go-netroute v0.2.1 github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81 github.com/mdlayher/socket v0.5.1 @@ -100,21 +103,21 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 github.com/yusufpapurcu/wmi v1.2.4 github.com/zcalusic/sysinfo v1.1.3 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 - go.opentelemetry.io/otel v1.38.0 - go.opentelemetry.io/otel/exporters/prometheus v0.48.0 - go.opentelemetry.io/otel/metric v1.38.0 - go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 + go.opentelemetry.io/otel v1.42.0 + go.opentelemetry.io/otel/exporters/prometheus v0.64.0 + go.opentelemetry.io/otel/metric v1.42.0 + go.opentelemetry.io/otel/sdk/metric v1.42.0 go.uber.org/mock v0.5.2 go.uber.org/zap v1.27.0 goauthentik.io/api/v3 v3.2023051.3 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/mobile v0.0.0-20251113184115-a159579294ab - golang.org/x/mod v0.30.0 - golang.org/x/net v0.47.0 + golang.org/x/mod v0.32.0 + golang.org/x/net v0.51.0 golang.org/x/oauth2 v0.34.0 golang.org/x/sync v0.19.0 - golang.org/x/term v0.38.0 + golang.org/x/term v0.40.0 golang.org/x/time v0.14.0 google.golang.org/api v0.257.0 gopkg.in/yaml.v3 v3.0.1 @@ -143,7 +146,6 @@ require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/awnumar/memcall v0.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect @@ -181,7 +183,6 @@ require ( github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect github.com/go-gl/gl v0.0.0-20231021071112-07e5d0ea2e71 // indirect github.com/go-gl/glfw/v3.3/glfw v0.0.0-20240506104042-037f3cc74f2a // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-ldap/ldap/v3 v3.4.12 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -200,10 +201,12 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/huandu/xstrings v1.5.0 // indirect + github.com/huin/goupnp v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jeandeaual/go-locale v0.0.0-20250612000132-0ef82f21eade // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect @@ -213,6 +216,7 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/fs v0.1.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/libdns/libdns v0.2.2 // indirect @@ -249,12 +253,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/common v0.67.5 // indirect + github.com/prometheus/otlptranslator v1.0.0 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/russellhaering/goxmldsig v1.5.0 // indirect github.com/rymdport/portal v0.4.2 // indirect github.com/shirou/gopsutil/v4 v4.25.1 // indirect - github.com/shoenig/go-m1cpu v0.2.0 // indirect + github.com/shoenig/go-m1cpu v0.2.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c // indirect @@ -269,15 +274,15 @@ require ( github.com/zeebo/blake3 v0.2.3 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.42.0 // indirect + go.opentelemetry.io/otel/trace v1.42.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/image v0.33.0 // indirect - golang.org/x/text v0.32.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/text v0.34.0 // indirect + golang.org/x/tools v0.41.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) diff --git a/go.sum b/go.sum index 57e07dd7b..a1d2bb71f 100644 --- a/go.sum +++ b/go.sum @@ -281,6 +281,8 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= +github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -291,6 +293,8 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -328,6 +332,8 @@ github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYW github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -346,6 +352,8 @@ github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libdns/route53 v1.5.0 h1:2SKdpPFl/qgWsXQvsLNJJAoX7rSxlk7zgoL4jnWdXVA= github.com/libdns/route53 v1.5.0/go.mod h1:joT4hKmaTNKHEwb7GmZ65eoDz1whTu7KKYPS8ZqIh6Q= +github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= +github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81 h1:J56rFEfUTFT9j9CiRXhi1r8lUJ4W5idG3CiaBZGojNU= github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81/go.mod h1:RD8ML/YdXctQ7qbcizZkw5mZ6l8Ogrl1dodBzVJduwI= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= @@ -487,10 +495,12 @@ github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= +github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= +github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos= +github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= @@ -511,8 +521,8 @@ github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKd github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs= github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/go-m1cpu v0.2.0 h1:t4GNqvPZ84Vjtpboo/kT3pIkbaK3vc+JIlD/Wz1zSFY= -github.com/shoenig/go-m1cpu v0.2.0/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= +github.com/shoenig/go-m1cpu v0.2.1 h1:yqRB4fvOge2+FyRXFkXqsyMoqPazv14Yyy+iyccT2E4= +github.com/shoenig/go-m1cpu v0.2.1/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= @@ -603,26 +613,26 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0/go.mod h1:NoUCKYWK+3ecatC4HjkRktREheMeEtrXoQxrqYFeHSc= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.42.0 h1:lSQGzTgVR3+sgJDAU/7/ZMjN9Z+vUip7leaqBKy4sho= +go.opentelemetry.io/otel v1.42.0/go.mod h1:lJNsdRMxCUIWuMlVJWzecSMuNjE7dOYyWlqOXWkdqCc= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= -go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/exporters/prometheus v0.64.0 h1:g0LRDXMX/G1SEZtK8zl8Chm4K6GBwRkjPKE36LxiTYs= +go.opentelemetry.io/otel/exporters/prometheus v0.64.0/go.mod h1:UrgcjnarfdlBDP3GjDIJWe6HTprwSazNjwsI+Ru6hro= +go.opentelemetry.io/otel/metric v1.42.0 h1:2jXG+3oZLNXEPfNmnpxKDeZsFI5o4J+nz6xUlaFdF/4= +go.opentelemetry.io/otel/metric v1.42.0/go.mod h1:RlUN/7vTU7Ao/diDkEpQpnz3/92J9ko05BIwxYa2SSI= +go.opentelemetry.io/otel/sdk v1.42.0 h1:LyC8+jqk6UJwdrI/8VydAq/hvkFKNHZVIWuslJXYsDo= +go.opentelemetry.io/otel/sdk v1.42.0/go.mod h1:rGHCAxd9DAph0joO4W6OPwxjNTYWghRWmkHuGbayMts= +go.opentelemetry.io/otel/sdk/metric v1.42.0 h1:D/1QR46Clz6ajyZ3G8SgNlTJKBdGp84q9RKCAZ3YGuA= +go.opentelemetry.io/otel/sdk/metric v1.42.0/go.mod h1:Ua6AAlDKdZ7tdvaQKfSmnFTdHx37+J4ba8MwVCYM5hc= +go.opentelemetry.io/otel/trace v1.42.0 h1:OUCgIPt+mzOnaUTpOQcBiM/PLQ/Op7oq6g4LenLmOYY= +go.opentelemetry.io/otel/trace v1.42.0/go.mod h1:f3K9S+IFqnumBkKhRJMeaZeNk9epyhnCmQh/EysQCdc= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -633,8 +643,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= goauthentik.io/api/v3 v3.2023051.3 h1:NebAhD/TeTWNo/9X3/Uj+rM5fG1HaiLOlKTNLQv9Qq4= goauthentik.io/api/v3 v3.2023051.3/go.mod h1:nYECml4jGbp/541hj8GcylKQG1gVBsKppHy4+7G8u4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -648,8 +658,8 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ= @@ -666,8 +676,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -686,8 +696,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= +golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -738,8 +748,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -752,8 +762,8 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -765,8 +775,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -780,8 +790,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -799,12 +809,12 @@ google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -815,8 +825,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/idp/dex/config.go b/idp/dex/config.go index 3db04a4cb..7f5300f14 100644 --- a/idp/dex/config.go +++ b/idp/dex/config.go @@ -170,20 +170,66 @@ type Connector struct { } // ToStorageConnector converts a Connector to storage.Connector type. +// It maps custom connector types (e.g., "zitadel", "entra") to Dex-native types +// and augments the config with OIDC defaults when needed. func (c *Connector) ToStorageConnector() (storage.Connector, error) { - data, err := json.Marshal(c.Config) + dexType, augmentedConfig := mapConnectorToDex(c.Type, c.Config) + + data, err := json.Marshal(augmentedConfig) if err != nil { return storage.Connector{}, fmt.Errorf("failed to marshal connector config: %v", err) } return storage.Connector{ ID: c.ID, - Type: c.Type, + Type: dexType, Name: c.Name, Config: data, }, nil } +// mapConnectorToDex maps custom connector types to Dex-native types and applies +// OIDC defaults. This ensures static connectors from config files or env vars +// are stored with types that Dex can open. +func mapConnectorToDex(connType string, config map[string]interface{}) (string, map[string]interface{}) { + switch connType { + case "oidc", "zitadel", "entra", "okta", "pocketid", "authentik", "keycloak": + return "oidc", applyOIDCDefaults(connType, config) + default: + return connType, config + } +} + +// applyOIDCDefaults clones the config map, sets common OIDC defaults, +// and applies provider-specific overrides. +func applyOIDCDefaults(connType string, config map[string]interface{}) map[string]interface{} { + augmented := make(map[string]interface{}, len(config)+4) + for k, v := range config { + augmented[k] = v + } + setDefault(augmented, "scopes", []string{"openid", "profile", "email"}) + setDefault(augmented, "insecureEnableGroups", true) + setDefault(augmented, "insecureSkipEmailVerified", true) + + switch connType { + case "zitadel": + setDefault(augmented, "getUserInfo", true) + case "entra": + setDefault(augmented, "claimMapping", map[string]string{"email": "preferred_username"}) + case "okta", "pocketid": + augmented["scopes"] = []string{"openid", "profile", "email", "groups"} + } + + return augmented +} + +// setDefault sets a key in the map only if it doesn't already exist. +func setDefault(m map[string]interface{}, key string, value interface{}) { + if _, ok := m[key]; !ok { + m[key] = value + } +} + // StorageConfig is a configuration that can create a storage. type StorageConfig interface { Open(logger *slog.Logger) (storage.Storage, error) diff --git a/idp/dex/provider.go b/idp/dex/provider.go index 68fe48486..24aed1b99 100644 --- a/idp/dex/provider.go +++ b/idp/dex/provider.go @@ -4,6 +4,7 @@ package dex import ( "context" "encoding/base64" + "encoding/json" "errors" "fmt" "log/slog" @@ -19,10 +20,13 @@ import ( "github.com/dexidp/dex/server" "github.com/dexidp/dex/storage" "github.com/dexidp/dex/storage/sql" + jose "github.com/go-jose/go-jose/v4" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc" + + nbjwt "github.com/netbirdio/netbird/shared/auth/jwt" ) // Config matches what management/internals/server/server.go expects @@ -666,3 +670,46 @@ func (p *Provider) GetAuthorizationEndpoint() string { } return issuer + "/auth" } + +// GetJWKS reads signing keys directly from Dex storage and returns them as Jwks. +// This avoids HTTP round-trips when the embedded IDP is co-located with the management server. +// The key retrieval mirrors Dex's own handlePublicKeys/ValidationKeys logic: +// SigningKeyPub first, then all VerificationKeys, serialized via go-jose. +func (p *Provider) GetJWKS(ctx context.Context) (*nbjwt.Jwks, error) { + keys, err := p.storage.GetKeys(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get keys from storage: %w", err) + } + + if keys.SigningKeyPub == nil { + return nil, fmt.Errorf("no public keys found in storage") + } + + // Build the key set exactly as Dex's localSigner.ValidationKeys does: + // signing key first, then all verification (rotated) keys. + joseKeys := make([]jose.JSONWebKey, 0, len(keys.VerificationKeys)+1) + joseKeys = append(joseKeys, *keys.SigningKeyPub) + for _, vk := range keys.VerificationKeys { + if vk.PublicKey != nil { + joseKeys = append(joseKeys, *vk.PublicKey) + } + } + + // Serialize through go-jose (same as Dex's handlePublicKeys handler) + // then deserialize into our Jwks type, so the JSON field mapping is identical + // to what the /keys HTTP endpoint would return. + joseSet := jose.JSONWebKeySet{Keys: joseKeys} + data, err := json.Marshal(joseSet) + if err != nil { + return nil, fmt.Errorf("failed to marshal JWKS: %w", err) + } + + jwks := &nbjwt.Jwks{} + if err := json.Unmarshal(data, jwks); err != nil { + return nil, fmt.Errorf("failed to unmarshal JWKS: %w", err) + } + + jwks.ExpiresInTime = keys.NextRotation + + return jwks, nil +} diff --git a/idp/dex/provider_test.go b/idp/dex/provider_test.go index bd2f676fb..4ed89fd2e 100644 --- a/idp/dex/provider_test.go +++ b/idp/dex/provider_test.go @@ -2,11 +2,14 @@ package dex import ( "context" + "encoding/json" "log/slog" "os" "path/filepath" "testing" + "github.com/dexidp/dex/storage" + sqllib "github.com/dexidp/dex/storage/sql" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -197,6 +200,295 @@ enablePasswordDB: true t.Logf("User lookup successful: rawID=%s, connectorID=%s", rawID, connID) } +// openTestStorage creates a SQLite storage in the given directory for testing. +func openTestStorage(t *testing.T, tmpDir string) storage.Storage { + t.Helper() + logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + stor, err := (&sqllib.SQLite3{File: filepath.Join(tmpDir, "dex.db")}).Open(logger) + require.NoError(t, err) + return stor +} + +func TestStaticConnectors_CreatedFromYAML(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: My OIDC Provider + config: + issuer: https://accounts.example.com + clientID: test-client-id + clientSecret: test-client-secret + redirectURI: http://localhost:5556/dex/callback +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + // Open storage and run initializeStorage directly (avoids Dex server + // trying to dial the OIDC issuer) + stor := openTestStorage(t, tmpDir) + defer stor.Close() + + err = initializeStorage(ctx, stor, yamlConfig) + require.NoError(t, err) + + // Verify connector was created in storage + conn, err := stor.GetConnector(ctx, "my-oidc") + require.NoError(t, err) + assert.Equal(t, "my-oidc", conn.ID) + assert.Equal(t, "My OIDC Provider", conn.Name) + assert.Equal(t, "oidc", conn.Type) + + // Verify config fields were serialized correctly + var configMap map[string]interface{} + err = json.Unmarshal(conn.Config, &configMap) + require.NoError(t, err) + assert.Equal(t, "https://accounts.example.com", configMap["issuer"]) + assert.Equal(t, "test-client-id", configMap["clientID"]) +} + +func TestStaticConnectors_UpdatedOnRestart(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-update-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + dbFile := filepath.Join(tmpDir, "dex.db") + + // First: load config with initial connector + yamlContent1 := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + dbFile + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: Original Name + config: + issuer: https://accounts.example.com + clientID: original-client-id + clientSecret: original-secret +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent1), 0644) + require.NoError(t, err) + + yamlConfig1, err := LoadConfig(configPath) + require.NoError(t, err) + + stor := openTestStorage(t, tmpDir) + err = initializeStorage(ctx, stor, yamlConfig1) + require.NoError(t, err) + + // Verify initial state + conn, err := stor.GetConnector(ctx, "my-oidc") + require.NoError(t, err) + assert.Equal(t, "Original Name", conn.Name) + + var configMap1 map[string]interface{} + err = json.Unmarshal(conn.Config, &configMap1) + require.NoError(t, err) + assert.Equal(t, "original-client-id", configMap1["clientID"]) + + // Close storage to simulate restart + stor.Close() + + // Second: load updated config against the same DB + yamlContent2 := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + dbFile + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: Updated Name + config: + issuer: https://accounts.example.com + clientID: updated-client-id + clientSecret: updated-secret +` + err = os.WriteFile(configPath, []byte(yamlContent2), 0644) + require.NoError(t, err) + + yamlConfig2, err := LoadConfig(configPath) + require.NoError(t, err) + + stor2 := openTestStorage(t, tmpDir) + defer stor2.Close() + + err = initializeStorage(ctx, stor2, yamlConfig2) + require.NoError(t, err) + + // Verify connector was updated, not duplicated + allConnectors, err := stor2.ListConnectors(ctx) + require.NoError(t, err) + + nonLocalCount := 0 + for _, c := range allConnectors { + if c.ID != "local" { + nonLocalCount++ + } + } + assert.Equal(t, 1, nonLocalCount, "connector should be updated, not duplicated") + + conn2, err := stor2.GetConnector(ctx, "my-oidc") + require.NoError(t, err) + assert.Equal(t, "Updated Name", conn2.Name) + + var configMap2 map[string]interface{} + err = json.Unmarshal(conn2.Config, &configMap2) + require.NoError(t, err) + assert.Equal(t, "updated-client-id", configMap2["clientID"]) +} + +func TestStaticConnectors_MultipleConnectors(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-multi-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +connectors: +- type: oidc + id: my-oidc + name: My OIDC Provider + config: + issuer: https://accounts.example.com + clientID: oidc-client-id + clientSecret: oidc-secret +- type: google + id: my-google + name: Google Login + config: + clientID: google-client-id + clientSecret: google-secret +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + stor := openTestStorage(t, tmpDir) + defer stor.Close() + + err = initializeStorage(ctx, stor, yamlConfig) + require.NoError(t, err) + + allConnectors, err := stor.ListConnectors(ctx) + require.NoError(t, err) + + // Build a map for easier assertion + connByID := make(map[string]storage.Connector) + for _, c := range allConnectors { + connByID[c.ID] = c + } + + // Verify both static connectors exist + oidcConn, ok := connByID["my-oidc"] + require.True(t, ok, "oidc connector should exist") + assert.Equal(t, "My OIDC Provider", oidcConn.Name) + assert.Equal(t, "oidc", oidcConn.Type) + + var oidcConfig map[string]interface{} + err = json.Unmarshal(oidcConn.Config, &oidcConfig) + require.NoError(t, err) + assert.Equal(t, "oidc-client-id", oidcConfig["clientID"]) + + googleConn, ok := connByID["my-google"] + require.True(t, ok, "google connector should exist") + assert.Equal(t, "Google Login", googleConn.Name) + assert.Equal(t, "google", googleConn.Type) + + var googleConfig map[string]interface{} + err = json.Unmarshal(googleConn.Config, &googleConfig) + require.NoError(t, err) + assert.Equal(t, "google-client-id", googleConfig["clientID"]) + + // Verify local connector still exists alongside them (enablePasswordDB: true) + localConn, ok := connByID["local"] + require.True(t, ok, "local connector should exist") + assert.Equal(t, "local", localConn.Type) +} + +func TestStaticConnectors_EmptyList(t *testing.T) { + ctx := context.Background() + + tmpDir, err := os.MkdirTemp("", "dex-static-conn-empty-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + yamlContent := ` +issuer: http://localhost:5556/dex +storage: + type: sqlite3 + config: + file: ` + filepath.Join(tmpDir, "dex.db") + ` +web: + http: 127.0.0.1:5556 +enablePasswordDB: true +` + configPath := filepath.Join(tmpDir, "config.yaml") + err = os.WriteFile(configPath, []byte(yamlContent), 0644) + require.NoError(t, err) + + yamlConfig, err := LoadConfig(configPath) + require.NoError(t, err) + + provider, err := NewProviderFromYAML(ctx, yamlConfig) + require.NoError(t, err) + defer func() { _ = provider.Stop(ctx) }() + + // No static connectors configured, so ListConnectors should return empty + connectors, err := provider.ListConnectors(ctx) + require.NoError(t, err) + assert.Empty(t, connectors) + + // But local connector should still exist + localConn, err := provider.Storage().GetConnector(ctx, "local") + require.NoError(t, err) + assert.Equal(t, "local", localConn.ID) +} + func TestNewProvider_ContinueOnConnectorFailure(t *testing.T) { ctx := context.Background() diff --git a/infrastructure_files/getting-started-with-dex.sh b/infrastructure_files/getting-started-with-dex.sh index a14c6134e..5e605f19c 100755 --- a/infrastructure_files/getting-started-with-dex.sh +++ b/infrastructure_files/getting-started-with-dex.sh @@ -172,8 +172,11 @@ init_environment() { echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" echo "" echo "Login with the following credentials:" - echo "Email: admin@$NETBIRD_DOMAIN" | tee .env - echo "Password: $NETBIRD_ADMIN_PASSWORD" | tee -a .env + install -m 600 /dev/null .env + printf 'Email: admin@%s\nPassword: %s\n' \ + "$NETBIRD_DOMAIN" "$NETBIRD_ADMIN_PASSWORD" >> .env + echo "Email: admin@$NETBIRD_DOMAIN" + echo "Password: $NETBIRD_ADMIN_PASSWORD" echo "" echo "Dex admin UI is not available (Dex has no built-in UI)." echo "To add more users, edit dex.yaml and restart: $DOCKER_COMPOSE_COMMAND restart dex" diff --git a/infrastructure_files/getting-started-with-zitadel.sh b/infrastructure_files/getting-started-with-zitadel.sh index 09c5225ad..f503cbeac 100644 --- a/infrastructure_files/getting-started-with-zitadel.sh +++ b/infrastructure_files/getting-started-with-zitadel.sh @@ -563,8 +563,11 @@ initEnvironment() { echo -e "\nDone!\n" echo "You can access the NetBird dashboard at $NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN" echo "Login with the following credentials:" - echo "Username: $ZITADEL_ADMIN_USERNAME" | tee .env - echo "Password: $ZITADEL_ADMIN_PASSWORD" | tee -a .env + install -m 600 /dev/null .env + printf 'Username: %s\nPassword: %s\n' \ + "$ZITADEL_ADMIN_USERNAME" "$ZITADEL_ADMIN_PASSWORD" >> .env + echo "Username: $ZITADEL_ADMIN_USERNAME" + echo "Password: $ZITADEL_ADMIN_PASSWORD" } renderCaddyfile() { diff --git a/infrastructure_files/getting-started.sh b/infrastructure_files/getting-started.sh index 70088d66a..9236d851d 100755 --- a/infrastructure_files/getting-started.sh +++ b/infrastructure_files/getting-started.sh @@ -1154,7 +1154,16 @@ print_builtin_traefik_instructions() { echo " - $NETBIRD_STUN_PORT/udp (STUN - required for NAT traversal)" if [[ "$ENABLE_PROXY" == "true" ]]; then echo " - 51820/udp (WIREGUARD - (optional) for P2P proxy connections)" - echo "" + fi + echo "" + echo "This setup is ideal for homelabs and smaller organization deployments." + echo "For enterprise environments requiring high availability and advanced integrations," + echo "consider a commercial on-prem license or scaling your open source deployment:" + echo "" + echo " Commercial license: https://netbird.io/pricing#on-prem" + echo " Scaling guide: https://docs.netbird.io/scaling-your-self-hosted-deployment" + echo "" + if [[ "$ENABLE_PROXY" == "true" ]]; then echo "NetBird Proxy:" echo " The proxy service is enabled and running." echo " Any domain NOT matching $NETBIRD_DOMAIN will be passed through to the proxy." diff --git a/infrastructure_files/observability/grafana/dashboards/management.json b/infrastructure_files/observability/grafana/dashboards/management.json index 95983603f..f116a8bde 100644 --- a/infrastructure_files/observability/grafana/dashboards/management.json +++ b/infrastructure_files/observability/grafana/dashboards/management.json @@ -302,7 +302,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_account_peer_meta_update_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_account_peer_meta_update_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -410,7 +410,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.5,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.5,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -426,7 +426,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.9,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.9,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -443,7 +443,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.99,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.99,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -545,7 +545,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.5,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.5,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -561,7 +561,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.9,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.9,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -578,7 +578,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.99,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.99,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -694,7 +694,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.5,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.5,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -710,7 +710,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.9,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.9,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -727,7 +727,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "histogram_quantile(0.99,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", + "expr": "histogram_quantile(0.99,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -841,7 +841,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -853,7 +853,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -866,7 +866,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -963,7 +963,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -975,7 +975,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -988,7 +988,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1085,7 +1085,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -1097,7 +1097,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -1110,7 +1110,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1221,7 +1221,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_idp_authenticate_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_idp_authenticate_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -1317,7 +1317,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_idp_get_account_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_idp_get_account_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -1413,7 +1413,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "rate(management_idp_update_user_meta_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", + "expr": "rate(management_idp_update_user_meta_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])", "instant": false, "legendFormat": "{{cluster}}/{{environment}}/{{job}}", "range": true, @@ -1523,7 +1523,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"GET|OPTIONS\"}[$__rate_interval])) by (job,method)", + "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"GET|OPTIONS\"}[$__rate_interval])) by (job,method)", "instant": false, "legendFormat": "{{method}}", "range": true, @@ -1619,7 +1619,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"POST|PUT|DELETE\"}[$__rate_interval])) by (job,method)", + "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"POST|PUT|DELETE\"}[$__rate_interval])) by (job,method)", "instant": false, "legendFormat": "{{method}}", "range": true, @@ -1715,7 +1715,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -1727,7 +1727,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -1740,7 +1740,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1837,7 +1837,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", "instant": false, "legendFormat": "p50", "range": true, @@ -1849,7 +1849,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p90", @@ -1862,7 +1862,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))", "hide": false, "instant": false, "legendFormat": "p99", @@ -1963,7 +1963,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (job,exported_endpoint,method)", + "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (job,exported_endpoint,method)", "hide": false, "instant": false, "legendFormat": "{{method}}-{{exported_endpoint}}", @@ -3222,7 +3222,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by(le) (increase(management_grpc_updatechannel_queue_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", + "expr": "sum by(le) (increase(management_grpc_updatechannel_queue_length_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, @@ -3323,7 +3323,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by(le) (increase(management_account_update_account_peers_duration_ms_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", + "expr": "sum by(le) (increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))", "format": "heatmap", "fullMetaSearch": false, "includeNullMetadata": true, diff --git a/management/internals/modules/peers/manager.go b/management/internals/modules/peers/manager.go index 7cb0f3908..d3f8f44ff 100644 --- a/management/internals/modules/peers/manager.go +++ b/management/internals/modules/peers/manager.go @@ -154,9 +154,11 @@ func (m *managerImpl) DeletePeers(ctx context.Context, accountID string, peerIDs return err } - eventsToStore = append(eventsToStore, func() { - m.accountManager.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) - }) + if !(peer.ProxyMeta.Embedded || peer.Meta.KernelVersion == "wasm") { + eventsToStore = append(eventsToStore, func() { + m.accountManager.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) + }) + } return nil }) diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go index e8d0ce763..59d7704eb 100644 --- a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go @@ -106,13 +106,23 @@ func (m *managerImpl) CleanupOldAccessLogs(ctx context.Context, retentionDays in // StartPeriodicCleanup starts a background goroutine that periodically cleans up old access logs func (m *managerImpl) StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) { - if retentionDays <= 0 { - log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is 0 or negative") + if retentionDays < 0 { + log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is negative") return } + if retentionDays == 0 { + retentionDays = 7 + log.WithContext(ctx).Debugf("no retention days specified for access log cleanup, defaulting to %d days", retentionDays) + } else { + log.WithContext(ctx).Debugf("access log retention period set to %d days", retentionDays) + } + if cleanupIntervalHours <= 0 { cleanupIntervalHours = 24 + log.WithContext(ctx).Debugf("no cleanup interval specified for access log cleanup, defaulting to %d hours", cleanupIntervalHours) + } else { + log.WithContext(ctx).Debugf("access log cleanup interval set to %d hours", cleanupIntervalHours) } cleanupCtx, cancel := context.WithCancel(ctx) diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go index 8fadef85f..11bf60829 100644 --- a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go @@ -121,7 +121,7 @@ func TestCleanupWithExactBoundary(t *testing.T) { } func TestStartPeriodicCleanup(t *testing.T) { - t.Run("periodic cleanup disabled with zero retention", func(t *testing.T) { + t.Run("periodic cleanup disabled with negative retention", func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -135,7 +135,7 @@ func TestStartPeriodicCleanup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - manager.StartPeriodicCleanup(ctx, 0, 1) + manager.StartPeriodicCleanup(ctx, -1, 1) time.Sleep(100 * time.Millisecond) diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go index 859f1c5b2..ae13bffae 100644 --- a/management/internals/modules/reverseproxy/domain/domain.go +++ b/management/internals/modules/reverseproxy/domain/domain.go @@ -30,3 +30,8 @@ func (d *Domain) EventMeta() map[string]any { "validated": d.Validated, } } + +func (d *Domain) Copy() *Domain { + dCopy := *d + return &dCopy +} diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go index f18df3d01..df2c27c4f 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager.go @@ -32,19 +32,15 @@ type store interface { type proxyManager interface { GetActiveClusterAddresses(ctx context.Context) ([]string, error) GetActiveClusterAddressesForAccount(ctx context.Context, accountID string) ([]string, error) -} - -type clusterCapabilities interface { - ClusterSupportsCustomPorts(clusterAddr string) *bool - ClusterRequireSubdomain(clusterAddr string) *bool + ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool } type Manager struct { - store store - validator domain.Validator - proxyManager proxyManager - clusterCapabilities clusterCapabilities - permissionsManager permissions.Manager + store store + validator domain.Validator + proxyManager proxyManager + permissionsManager permissions.Manager accountManager account.Manager } @@ -58,11 +54,6 @@ func NewManager(store store, proxyMgr proxyManager, permissionsManager permissio } } -// SetClusterCapabilities sets the cluster capabilities provider for domain queries. -func (m *Manager) SetClusterCapabilities(caps clusterCapabilities) { - m.clusterCapabilities = caps -} - func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*domain.Domain, error) { ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read) if err != nil { @@ -98,10 +89,8 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d Type: domain.TypeFree, Validated: true, } - if m.clusterCapabilities != nil { - d.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(cluster) - d.RequireSubdomain = m.clusterCapabilities.ClusterRequireSubdomain(cluster) - } + d.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, cluster) + d.RequireSubdomain = m.proxyManager.ClusterRequireSubdomain(ctx, cluster) ret = append(ret, d) } @@ -115,8 +104,8 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d Type: domain.TypeCustom, Validated: d.Validated, } - if m.clusterCapabilities != nil && d.TargetCluster != "" { - cd.SupportsCustomPorts = m.clusterCapabilities.ClusterSupportsCustomPorts(d.TargetCluster) + if d.TargetCluster != "" { + cd.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, d.TargetCluster) } // Custom domains never require a subdomain by default since // the account owns them and should be able to use the bare domain. diff --git a/management/internals/modules/reverseproxy/domain/manager/manager_test.go b/management/internals/modules/reverseproxy/domain/manager/manager_test.go index a080081a9..25c001ee9 100644 --- a/management/internals/modules/reverseproxy/domain/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/domain/manager/manager_test.go @@ -28,6 +28,14 @@ func (m *mockProxyManager) GetActiveClusterAddressesForAccount(ctx context.Conte return nil, nil } +func (m *mockProxyManager) ClusterSupportsCustomPorts(_ context.Context, _ string) *bool { + return nil +} + +func (m *mockProxyManager) ClusterRequireSubdomain(_ context.Context, _ string) *bool { + return nil +} + func TestGetClusterAllowList_BYOPProxy(t *testing.T) { pm := &mockProxyManager{ getActiveClusterAddressesForAccountFunc: func(_ context.Context, accID string) ([]string, error) { diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go index 7bd44bc0e..86746e1ec 100644 --- a/management/internals/modules/reverseproxy/proxy/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager.go @@ -11,12 +11,14 @@ import ( // Manager defines the interface for proxy operations type Manager interface { - Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, accountID *string) error + Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, accountID *string, capabilities *Capabilities) error Disconnect(ctx context.Context, proxyID string) error Heartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error GetActiveClusterAddresses(ctx context.Context) ([]string, error) GetActiveClusterAddressesForAccount(ctx context.Context, accountID string) ([]string, error) GetActiveClusters(ctx context.Context) ([]Cluster, error) + ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool CleanupStale(ctx context.Context, inactivityDuration time.Duration) error GetAccountProxy(ctx context.Context, accountID string) (*Proxy, error) CountAccountProxies(ctx context.Context, accountID string) (int64, error) @@ -39,6 +41,4 @@ type Controller interface { RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error GetProxiesForCluster(clusterAddr string) []string - ClusterSupportsCustomPorts(clusterAddr string) *bool - ClusterRequireSubdomain(clusterAddr string) *bool } diff --git a/management/internals/modules/reverseproxy/proxy/manager/controller.go b/management/internals/modules/reverseproxy/proxy/manager/controller.go index 05a0c9048..e5b3e9886 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/controller.go +++ b/management/internals/modules/reverseproxy/proxy/manager/controller.go @@ -72,17 +72,6 @@ func (c *GRPCController) UnregisterProxyFromCluster(ctx context.Context, cluster return nil } -// ClusterSupportsCustomPorts returns whether any proxy in the cluster supports custom ports. -func (c *GRPCController) ClusterSupportsCustomPorts(clusterAddr string) *bool { - return c.proxyGRPCServer.ClusterSupportsCustomPorts(clusterAddr) -} - -// ClusterRequireSubdomain returns whether the cluster requires a subdomain label. -// Returns nil when no proxy has reported the capability (defaults to false). -func (c *GRPCController) ClusterRequireSubdomain(clusterAddr string) *bool { - return c.proxyGRPCServer.ClusterRequireSubdomain(clusterAddr) -} - // GetProxiesForCluster returns all proxy IDs registered for a specific cluster. func (c *GRPCController) GetProxiesForCluster(clusterAddr string) []string { proxySet, ok := c.clusterProxies.Load(clusterAddr) diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go index bc4d7b800..50b3ee692 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/manager.go +++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go @@ -18,6 +18,8 @@ type store interface { GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) GetActiveProxyClusterAddressesForAccount(ctx context.Context, accountID string) ([]string, error) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) + GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error GetProxyByAccountID(ctx context.Context, accountID string) (*proxy.Proxy, error) CountProxiesByAccountID(ctx context.Context, accountID string) (int64, error) @@ -44,9 +46,14 @@ func NewManager(store store, meter metric.Meter) (*Manager, error) { }, nil } -// Connect registers a new proxy connection in the database -func (m *Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, accountID *string) error { +// Connect registers a new proxy connection in the database. +// capabilities may be nil for old proxies that do not report them. +func (m *Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, accountID *string, capabilities *proxy.Capabilities) error { now := time.Now() + var caps proxy.Capabilities + if capabilities != nil { + caps = *capabilities + } p := &proxy.Proxy{ ID: proxyID, ClusterAddress: clusterAddress, @@ -55,6 +62,7 @@ func (m *Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddres LastSeen: now, ConnectedAt: &now, Status: proxy.StatusConnected, + Capabilities: caps, } if err := m.store.SaveProxy(ctx, p); err != nil { @@ -117,6 +125,18 @@ func (m Manager) GetActiveClusters(ctx context.Context) ([]proxy.Cluster, error) return clusters, nil } +// ClusterSupportsCustomPorts returns whether any active proxy in the cluster +// supports custom ports. Returns nil when no proxy has reported capabilities. +func (m Manager) ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + return m.store.GetClusterSupportsCustomPorts(ctx, clusterAddr) +} + +// ClusterRequireSubdomain returns whether any active proxy in the cluster +// requires a subdomain. Returns nil when no proxy has reported capabilities. +func (m Manager) ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + return m.store.GetClusterRequireSubdomain(ctx, clusterAddr) +} + // CleanupStale removes proxies that haven't sent heartbeat in the specified duration func (m *Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error { if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil { diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager_test.go b/management/internals/modules/reverseproxy/proxy/manager/manager_test.go index 5baee4e68..65a09a6f4 100644 --- a/management/internals/modules/reverseproxy/proxy/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/proxy/manager/manager_test.go @@ -90,6 +90,12 @@ func (m *mockStore) DeleteProxy(ctx context.Context, proxyID string) error { } return nil } +func (m *mockStore) GetClusterSupportsCustomPorts(_ context.Context, _ string) *bool { + return nil +} +func (m *mockStore) GetClusterRequireSubdomain(_ context.Context, _ string) *bool { + return nil +} func newTestManager(s store) *Manager { meter := noop.NewMeterProvider().Meter("test") @@ -112,7 +118,7 @@ func TestConnect_WithAccountID(t *testing.T) { } mgr := newTestManager(s) - err := mgr.Connect(context.Background(), "proxy-1", "cluster.example.com", "10.0.0.1", &accountID) + err := mgr.Connect(context.Background(), "proxy-1", "cluster.example.com", "10.0.0.1", &accountID, nil) require.NoError(t, err) require.NotNil(t, savedProxy) @@ -134,7 +140,7 @@ func TestConnect_WithoutAccountID(t *testing.T) { } mgr := newTestManager(s) - err := mgr.Connect(context.Background(), "proxy-1", "eu.proxy.netbird.io", "10.0.0.1", nil) + err := mgr.Connect(context.Background(), "proxy-1", "eu.proxy.netbird.io", "10.0.0.1", nil, nil) require.NoError(t, err) require.NotNil(t, savedProxy) @@ -150,7 +156,7 @@ func TestConnect_StoreError(t *testing.T) { } mgr := newTestManager(s) - err := mgr.Connect(context.Background(), "proxy-1", "cluster.example.com", "10.0.0.1", nil) + err := mgr.Connect(context.Background(), "proxy-1", "cluster.example.com", "10.0.0.1", nil, nil) assert.Error(t, err) } diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go index 5c9260741..64156f891 100644 --- a/management/internals/modules/reverseproxy/proxy/manager_mock.go +++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go @@ -50,18 +50,46 @@ func (mr *MockManagerMockRecorder) CleanupStale(ctx, inactivityDuration interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStale", reflect.TypeOf((*MockManager)(nil).CleanupStale), ctx, inactivityDuration) } -// Connect mocks base method. -func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, accountID *string) error { +// ClusterSupportsCustomPorts mocks base method. +func (m *MockManager) ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connect", ctx, proxyID, clusterAddress, ipAddress, accountID) + ret := m.ctrl.Call(m, "ClusterSupportsCustomPorts", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterSupportsCustomPorts indicates an expected call of ClusterSupportsCustomPorts. +func (mr *MockManagerMockRecorder) ClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockManager)(nil).ClusterSupportsCustomPorts), ctx, clusterAddr) +} + +// ClusterRequireSubdomain mocks base method. +func (m *MockManager) ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterRequireSubdomain", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// ClusterRequireSubdomain indicates an expected call of ClusterRequireSubdomain. +func (mr *MockManagerMockRecorder) ClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockManager)(nil).ClusterRequireSubdomain), ctx, clusterAddr) +} + +// Connect mocks base method. +func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, accountID *string, capabilities *Capabilities) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connect", ctx, proxyID, clusterAddress, ipAddress, accountID, capabilities) ret0, _ := ret[0].(error) return ret0 } // Connect indicates an expected call of Connect. -func (mr *MockManagerMockRecorder) Connect(ctx, proxyID, clusterAddress, ipAddress, accountID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Connect(ctx, proxyID, clusterAddress, ipAddress, accountID, capabilities interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockManager)(nil).Connect), ctx, proxyID, clusterAddress, ipAddress, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockManager)(nil).Connect), ctx, proxyID, clusterAddress, ipAddress, accountID, capabilities) } // Disconnect mocks base method. @@ -216,34 +244,6 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// ClusterSupportsCustomPorts mocks base method. -func (m *MockController) ClusterSupportsCustomPorts(clusterAddr string) *bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClusterSupportsCustomPorts", clusterAddr) - ret0, _ := ret[0].(*bool) - return ret0 -} - -// ClusterSupportsCustomPorts indicates an expected call of ClusterSupportsCustomPorts. -func (mr *MockControllerMockRecorder) ClusterSupportsCustomPorts(clusterAddr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCustomPorts", reflect.TypeOf((*MockController)(nil).ClusterSupportsCustomPorts), clusterAddr) -} - -// ClusterRequireSubdomain mocks base method. -func (m *MockController) ClusterRequireSubdomain(clusterAddr string) *bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClusterRequireSubdomain", clusterAddr) - ret0, _ := ret[0].(*bool) - return ret0 -} - -// ClusterRequireSubdomain indicates an expected call of ClusterRequireSubdomain. -func (mr *MockControllerMockRecorder) ClusterRequireSubdomain(clusterAddr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockController)(nil).ClusterRequireSubdomain), clusterAddr) -} - // GetOIDCValidationConfig mocks base method. func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig { m.ctrl.T.Helper() diff --git a/management/internals/modules/reverseproxy/proxy/proxy.go b/management/internals/modules/reverseproxy/proxy/proxy.go index 32e48c510..4763e67ef 100644 --- a/management/internals/modules/reverseproxy/proxy/proxy.go +++ b/management/internals/modules/reverseproxy/proxy/proxy.go @@ -12,6 +12,17 @@ const ( StatusDisconnected = "disconnected" ) +// Capabilities describes what a proxy can handle, as reported via gRPC. +// Nil fields mean the proxy never reported this capability. +type Capabilities struct { + // SupportsCustomPorts indicates whether this proxy can bind arbitrary + // ports for TCP/UDP services. TLS uses SNI routing and is not gated. + SupportsCustomPorts *bool + // RequireSubdomain indicates whether a subdomain label is required in + // front of the cluster domain. + RequireSubdomain *bool +} + // Proxy represents a reverse proxy instance type Proxy struct { ID string `gorm:"primaryKey;type:varchar(255)"` @@ -22,6 +33,7 @@ type Proxy struct { ConnectedAt *time.Time DisconnectedAt *time.Time Status string `gorm:"type:varchar(20);not null;index:idx_proxy_cluster_status"` + Capabilities Capabilities `gorm:"embedded"` CreatedAt time.Time UpdatedAt time.Time } diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go index 8b652c7e1..47dce3a64 100644 --- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go +++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go @@ -75,16 +75,18 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor require.NoError(t, err) mockCtrl := proxy.NewMockController(ctrl) - mockCtrl.EXPECT().ClusterSupportsCustomPorts(gomock.Any()).Return(customPortsSupported).AnyTimes() - mockCtrl.EXPECT().ClusterRequireSubdomain(gomock.Any()).Return((*bool)(nil)).AnyTimes() mockCtrl.EXPECT().SendServiceUpdateToCluster(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() mockCtrl.EXPECT().GetOIDCValidationConfig().Return(proxy.OIDCValidationConfig{}).AnyTimes() + mockCaps := proxy.NewMockManager(ctrl) + mockCaps.EXPECT().ClusterSupportsCustomPorts(gomock.Any(), testCluster).Return(customPortsSupported).AnyTimes() + mockCaps.EXPECT().ClusterRequireSubdomain(gomock.Any(), testCluster).Return((*bool)(nil)).AnyTimes() + accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, - GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { - return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) + GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { + return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) }, } @@ -93,6 +95,7 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor accountManager: accountMgr, permissionsManager: permissions.NewManager(testStore), proxyController: mockCtrl, + capabilities: mockCaps, clusterDeriver: &testClusterDeriver{domains: []string{"test.netbird.io"}}, } mgr.exposeReaper = &exposeReaper{manager: mgr} diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go index a64955fc9..2083d1a86 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager.go +++ b/management/internals/modules/reverseproxy/service/manager/manager.go @@ -75,22 +75,30 @@ type ClusterDeriver interface { GetClusterDomains() []string } +// CapabilityProvider queries proxy cluster capabilities from the database. +type CapabilityProvider interface { + ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool +} + type Manager struct { store store.Store accountManager account.Manager permissionsManager permissions.Manager proxyController proxy.Controller + capabilities CapabilityProvider clusterDeriver ClusterDeriver exposeReaper *exposeReaper } // NewManager creates a new service manager. -func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyController proxy.Controller, clusterDeriver ClusterDeriver) *Manager { +func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyController proxy.Controller, capabilities CapabilityProvider, clusterDeriver ClusterDeriver) *Manager { mgr := &Manager{ store: store, accountManager: accountManager, permissionsManager: permissionsManager, proxyController: proxyController, + capabilities: capabilities, clusterDeriver: clusterDeriver, } mgr.exposeReaper = &exposeReaper{manager: mgr} @@ -237,7 +245,7 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri } service.ProxyCluster = proxyCluster - if err := m.validateSubdomainRequirement(service.Domain, proxyCluster); err != nil { + if err := m.validateSubdomainRequirement(ctx, service.Domain, proxyCluster); err != nil { return err } } @@ -268,11 +276,11 @@ func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID stri // validateSubdomainRequirement checks whether the domain can be used bare // (without a subdomain label) on the given cluster. If the cluster reports // require_subdomain=true and the domain equals the cluster domain, it rejects. -func (m *Manager) validateSubdomainRequirement(domain, cluster string) error { +func (m *Manager) validateSubdomainRequirement(ctx context.Context, domain, cluster string) error { if domain != cluster { return nil } - requireSub := m.proxyController.ClusterRequireSubdomain(cluster) + requireSub := m.capabilities.ClusterRequireSubdomain(ctx, cluster) if requireSub != nil && *requireSub { return status.Errorf(status.InvalidArgument, "domain %s requires a subdomain label", domain) } @@ -280,6 +288,8 @@ func (m *Manager) validateSubdomainRequirement(domain, cluster string) error { } func (m *Manager) persistNewService(ctx context.Context, accountID string, svc *service.Service) error { + customPorts := m.clusterCustomPorts(ctx, svc) + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { if svc.Domain != "" { if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil { @@ -287,7 +297,7 @@ func (m *Manager) persistNewService(ctx context.Context, accountID string, svc * } } - if err := m.ensureL4Port(ctx, transaction, svc); err != nil { + if err := m.ensureL4Port(ctx, transaction, svc, customPorts); err != nil { return err } @@ -307,12 +317,23 @@ func (m *Manager) persistNewService(ctx context.Context, accountID string, svc * }) } -// ensureL4Port auto-assigns a listen port when needed and validates cluster support. -func (m *Manager) ensureL4Port(ctx context.Context, tx store.Store, svc *service.Service) error { +// clusterCustomPorts queries whether the cluster supports custom ports. +// Must be called before entering a transaction: the underlying query uses +// the main DB handle, which deadlocks when called inside a transaction +// that already holds the connection. +func (m *Manager) clusterCustomPorts(ctx context.Context, svc *service.Service) *bool { + if !service.IsL4Protocol(svc.Mode) { + return nil + } + return m.capabilities.ClusterSupportsCustomPorts(ctx, svc.ProxyCluster) +} + +// ensureL4Port auto-assigns a listen port when needed and validates cluster support. +// customPorts must be pre-computed via clusterCustomPorts before entering a transaction. +func (m *Manager) ensureL4Port(ctx context.Context, tx store.Store, svc *service.Service, customPorts *bool) error { if !service.IsL4Protocol(svc.Mode) { return nil } - customPorts := m.proxyController.ClusterSupportsCustomPorts(svc.ProxyCluster) if service.IsPortBasedProtocol(svc.Mode) && svc.ListenPort > 0 && (customPorts == nil || !*customPorts) { if svc.Source != service.SourceEphemeral { return status.Errorf(status.InvalidArgument, "custom ports not supported on cluster %s", svc.ProxyCluster) @@ -396,12 +417,14 @@ func (m *Manager) assignPort(ctx context.Context, tx store.Store, cluster string // The count and exists queries use FOR UPDATE locking to serialize concurrent creates // for the same peer, preventing the per-peer limit from being bypassed. func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, peerID string, svc *service.Service) error { + customPorts := m.clusterCustomPorts(ctx, svc) + return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { if err := m.validateEphemeralPreconditions(ctx, transaction, accountID, peerID, svc); err != nil { return err } - if err := m.ensureL4Port(ctx, transaction, svc); err != nil { + if err := m.ensureL4Port(ctx, transaction, svc, customPorts); err != nil { return err } @@ -504,21 +527,58 @@ type serviceUpdateInfo struct { } func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, service *service.Service) (*serviceUpdateInfo, error) { + effectiveCluster, err := m.resolveEffectiveCluster(ctx, accountID, service) + if err != nil { + return nil, err + } + + svcForCaps := *service + svcForCaps.ProxyCluster = effectiveCluster + customPorts := m.clusterCustomPorts(ctx, &svcForCaps) + var updateInfo serviceUpdateInfo - err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { - return m.executeServiceUpdate(ctx, transaction, accountID, service, &updateInfo) + err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + return m.executeServiceUpdate(ctx, transaction, accountID, service, &updateInfo, customPorts) }) return &updateInfo, err } -func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.Store, accountID string, service *service.Service, updateInfo *serviceUpdateInfo) error { +// resolveEffectiveCluster determines the cluster that will be used after the update. +// It reads the existing service without locking and derives the new cluster if the domain changed. +func (m *Manager) resolveEffectiveCluster(ctx context.Context, accountID string, svc *service.Service) (string, error) { + existing, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, svc.ID) + if err != nil { + return "", err + } + + if existing.Domain == svc.Domain { + return existing.ProxyCluster, nil + } + + if m.clusterDeriver != nil { + derived, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, svc.Domain) + if err != nil { + log.WithError(err).Warnf("could not derive cluster from domain %s", svc.Domain) + } else { + return derived, nil + } + } + + return existing.ProxyCluster, nil +} + +func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.Store, accountID string, service *service.Service, updateInfo *serviceUpdateInfo, customPorts *bool) error { existingService, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, service.ID) if err != nil { return err } + if existingService.Terminated { + return status.Errorf(status.PermissionDenied, "service is terminated and cannot be updated") + } + if err := validateProtocolChange(existingService.Mode, service.Mode); err != nil { return err } @@ -534,7 +594,7 @@ func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.St service.ProxyCluster = existingService.ProxyCluster } - if err := m.validateSubdomainRequirement(service.Domain, service.ProxyCluster); err != nil { + if err := m.validateSubdomainRequirement(ctx, service.Domain, service.ProxyCluster); err != nil { return err } @@ -546,7 +606,7 @@ func (m *Manager) executeServiceUpdate(ctx context.Context, transaction store.St m.preserveListenPort(service, existingService) updateInfo.serviceEnabledChanged = existingService.Enabled != service.Enabled - if err := m.ensureL4Port(ctx, transaction, service); err != nil { + if err := m.ensureL4Port(ctx, transaction, service, customPorts); err != nil { return err } if err := m.checkPortConflict(ctx, transaction, service); err != nil { @@ -1063,7 +1123,7 @@ func (m *Manager) getGroupIDsFromNames(ctx context.Context, accountID string, gr } groupIDs := make([]string, 0, len(groupNames)) for _, groupName := range groupNames { - g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID) + g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID, activity.SystemInitiator) if err != nil { return nil, fmt.Errorf("failed to get group by name %s: %w", groupName, err) } diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go index b31f98e47..810c93294 100644 --- a/management/internals/modules/reverseproxy/service/manager/manager_test.go +++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go @@ -698,8 +698,8 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) { accountMgr := &mock_server.MockAccountManager{ StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {}, UpdateAccountPeersFunc: func(_ context.Context, _ string) {}, - GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) { - return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID) + GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { + return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) }, } @@ -1324,11 +1324,11 @@ func TestValidateSubdomainRequirement(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctrl := gomock.NewController(t) - mockCtrl := proxy.NewMockController(ctrl) - mockCtrl.EXPECT().ClusterRequireSubdomain(tc.cluster).Return(tc.requireSubdomain).AnyTimes() + mockCaps := proxy.NewMockManager(ctrl) + mockCaps.EXPECT().ClusterRequireSubdomain(gomock.Any(), tc.cluster).Return(tc.requireSubdomain).AnyTimes() - mgr := &Manager{proxyController: mockCtrl} - err := mgr.validateSubdomainRequirement(tc.domain, tc.cluster) + mgr := &Manager{capabilities: mockCaps} + err := mgr.validateSubdomainRequirement(context.Background(), tc.domain, tc.cluster) if tc.wantErr { require.Error(t, err) assert.Contains(t, err.Error(), "requires a subdomain label") diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go index be04777a1..60b36917c 100644 --- a/management/internals/modules/reverseproxy/service/service.go +++ b/management/internals/modules/reverseproxy/service/service.go @@ -184,6 +184,7 @@ type Service struct { ProxyCluster string `gorm:"index"` Targets []*Target `gorm:"foreignKey:ServiceID;constraint:OnDelete:CASCADE"` Enabled bool + Terminated bool PassHostHeader bool RewriteRedirects bool Auth AuthConfig `gorm:"serializer:json"` @@ -256,7 +257,7 @@ func (s *Service) ToAPIResponse() *api.Service { Protocol: api.ServiceTargetProtocol(target.Protocol), TargetId: target.TargetId, TargetType: api.ServiceTargetTargetType(target.TargetType), - Enabled: target.Enabled, + Enabled: target.Enabled && !s.Terminated, } opts := targetOptionsToAPI(target.Options) if opts == nil { @@ -286,7 +287,8 @@ func (s *Service) ToAPIResponse() *api.Service { Name: s.Name, Domain: s.Domain, Targets: apiTargets, - Enabled: s.Enabled, + Enabled: s.Enabled && !s.Terminated, + Terminated: &s.Terminated, PassHostHeader: &s.PassHostHeader, RewriteRedirects: &s.RewriteRedirects, Auth: authConfig, @@ -785,6 +787,11 @@ func (s *Service) validateHTTPTargets() error { } func (s *Service) validateL4Target(target *Target) error { + // L4 services have a single target; per-target disable is meaningless + // (use the service-level Enabled flag instead). Force it on so that + // buildPathMappings always includes the target in the proto. + target.Enabled = true + if target.Port == 0 { return errors.New("target port is required for L4 services") } @@ -850,7 +857,7 @@ func IsPortBasedProtocol(mode string) bool { } const ( - maxCustomHeaders = 16 + maxCustomHeaders = 16 maxHeaderKeyLen = 128 maxHeaderValueLen = 4096 ) @@ -947,7 +954,6 @@ func containsCRLF(s string) bool { } func validateHeaderAuths(headers []*HeaderAuthConfig) error { - seen := make(map[string]struct{}) for i, h := range headers { if h == nil || !h.Enabled { continue @@ -968,10 +974,6 @@ func validateHeaderAuths(headers []*HeaderAuthConfig) error { if canonical == "Host" { return fmt.Errorf("header_auths[%d]: Host header cannot be used for auth", i) } - if _, dup := seen[canonical]; dup { - return fmt.Errorf("header_auths[%d]: duplicate header %q (same canonical form already configured)", i, h.Header) - } - seen[canonical] = struct{}{} if len(h.Value) > maxHeaderValueLen { return fmt.Errorf("header_auths[%d]: value exceeds maximum length of %d", i, maxHeaderValueLen) } @@ -1130,6 +1132,7 @@ func (s *Service) Copy() *Service { ProxyCluster: s.ProxyCluster, Targets: targets, Enabled: s.Enabled, + Terminated: s.Terminated, PassHostHeader: s.PassHostHeader, RewriteRedirects: s.RewriteRedirects, Auth: authCopy, diff --git a/management/internals/modules/reverseproxy/service/service_test.go b/management/internals/modules/reverseproxy/service/service_test.go index 3fe07b1d0..ff54cb79f 100644 --- a/management/internals/modules/reverseproxy/service/service_test.go +++ b/management/internals/modules/reverseproxy/service/service_test.go @@ -935,3 +935,107 @@ func TestExposeServiceRequest_Validate_HTTPAllowsAuth(t *testing.T) { req := ExposeServiceRequest{Port: 8080, Mode: "http", Pin: "123456"} require.NoError(t, req.Validate()) } + +func TestValidate_HeaderAuths(t *testing.T) { + t.Run("single valid header", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "X-API-Key", Value: "secret"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("multiple headers same canonical name allowed", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Authorization", Value: "Bearer token-1"}, + {Enabled: true, Header: "Authorization", Value: "Bearer token-2"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("multiple headers different case same canonical allowed", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "x-api-key", Value: "key-1"}, + {Enabled: true, Header: "X-Api-Key", Value: "key-2"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("multiple different headers allowed", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Authorization", Value: "Bearer tok"}, + {Enabled: true, Header: "X-API-Key", Value: "key"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("empty header name rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "", Value: "val"}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "header name is required") + }) + + t.Run("hop-by-hop header rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Connection", Value: "val"}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "hop-by-hop") + }) + + t.Run("host header rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "Host", Value: "val"}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "Host header cannot be used") + }) + + t.Run("disabled entries skipped", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: false, Header: "", Value: ""}, + {Enabled: true, Header: "X-Key", Value: "val"}, + }, + } + require.NoError(t, rp.Validate()) + }) + + t.Run("value too long rejected", func(t *testing.T) { + rp := validProxy() + rp.Auth = AuthConfig{ + HeaderAuths: []*HeaderAuthConfig{ + {Enabled: true, Header: "X-Key", Value: strings.Repeat("a", maxHeaderValueLen+1)}, + }, + } + err := rp.Validate() + require.Error(t, err) + assert.Contains(t, err.Error(), "exceeds maximum length") + }) +} diff --git a/management/internals/server/config/config.go b/management/internals/server/config/config.go index 0ba393263..fb9c842b7 100644 --- a/management/internals/server/config/config.go +++ b/management/internals/server/config/config.go @@ -203,7 +203,7 @@ type ReverseProxy struct { // AccessLogRetentionDays specifies the number of days to retain access logs. // Logs older than this duration will be automatically deleted during cleanup. - // A value of 0 or negative means logs are kept indefinitely (no cleanup). + // A value of 0 will default to 7 days. Negative means logs are kept indefinitely (no cleanup). AccessLogRetentionDays int // AccessLogCleanupIntervalHours specifies how often (in hours) to run the cleanup routine. diff --git a/management/internals/server/controllers.go b/management/internals/server/controllers.go index 62ed659c0..c7eab3d19 100644 --- a/management/internals/server/controllers.go +++ b/management/internals/server/controllers.go @@ -20,6 +20,7 @@ import ( "github.com/netbirdio/netbird/management/server/integrations/integrated_validator" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" "github.com/netbirdio/netbird/management/server/job" + nbjwt "github.com/netbirdio/netbird/shared/auth/jwt" ) func (s *BaseServer) PeersUpdateManager() network_map.PeersUpdateManager { @@ -71,6 +72,7 @@ func (s *BaseServer) AuthManager() auth.Manager { signingKeyRefreshEnabled := s.Config.HttpConfig.IdpSignKeyRefreshEnabled issuer := s.Config.HttpConfig.AuthIssuer userIDClaim := s.Config.HttpConfig.AuthUserIDClaim + var keyFetcher nbjwt.KeyFetcher // Use embedded IdP configuration if available if oauthProvider := s.OAuthConfigProvider(); oauthProvider != nil { @@ -78,8 +80,11 @@ func (s *BaseServer) AuthManager() auth.Manager { if len(audiences) > 0 { audience = audiences[0] // Use the first client ID as the primary audience } - // Use localhost keys location for internal validation (management has embedded Dex) - keysLocation = oauthProvider.GetLocalKeysLocation() + keyFetcher = oauthProvider.GetKeyFetcher() + // Fall back to default keys location if direct key fetching is not available + if keyFetcher == nil { + keysLocation = oauthProvider.GetLocalKeysLocation() + } signingKeyRefreshEnabled = true issuer = oauthProvider.GetIssuer() userIDClaim = oauthProvider.GetUserIDClaim() @@ -92,7 +97,8 @@ func (s *BaseServer) AuthManager() auth.Manager { keysLocation, userIDClaim, audiences, - signingKeyRefreshEnabled) + signingKeyRefreshEnabled, + keyFetcher) }) } diff --git a/management/internals/server/modules.go b/management/internals/server/modules.go index a32cf6046..374ea5c81 100644 --- a/management/internals/server/modules.go +++ b/management/internals/server/modules.go @@ -117,9 +117,11 @@ func (s *BaseServer) IdpManager() idp.Manager { return Create(s, func() idp.Manager { var idpManager idp.Manager var err error + // Use embedded IdP service if embedded Dex is configured and enabled. // Legacy IdpManager won't be used anymore even if configured. - if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled { + embeddedEnabled := s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled + if embeddedEnabled { idpManager, err = idp.NewEmbeddedIdPManager(context.Background(), s.Config.EmbeddedIdP, s.Metrics()) if err != nil { log.Fatalf("failed to create embedded IDP service: %v", err) @@ -195,7 +197,7 @@ func (s *BaseServer) RecordsManager() records.Manager { func (s *BaseServer) ServiceManager() service.Manager { return Create(s, func() service.Manager { - return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ServiceProxyController(), s.ReverseProxyDomainManager()) + return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ServiceProxyController(), s.ProxyManager(), s.ReverseProxyDomainManager()) }) } @@ -212,9 +214,6 @@ func (s *BaseServer) ProxyManager() proxy.Manager { func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager { return Create(s, func() *manager.Manager { m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager(), s.AccountManager()) - s.AfterInit(func(s *BaseServer) { - m.SetClusterCapabilities(s.ServiceProxyController()) - }) return &m }) } diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go index 45bce424d..7f4575612 100644 --- a/management/internals/shared/grpc/proxy.go +++ b/management/internals/shared/grpc/proxy.go @@ -238,7 +238,14 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest cancel: cancel, } - if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo, accountID); err != nil { + var caps *proxy.Capabilities + if c := req.GetCapabilities(); c != nil { + caps = &proxy.Capabilities{ + SupportsCustomPorts: c.SupportsCustomPorts, + RequireSubdomain: c.RequireSubdomain, + } + } + if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo, accountID, caps); err != nil { if accountID != nil { cancel() if errors.Is(err, proxy.ErrAccountProxyAlreadyExists) { @@ -254,6 +261,7 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest log.WithContext(ctx).Warnf("Failed to register proxy %s in cluster: %v", proxyID, err) } + log.WithFields(log.Fields{ "proxy_id": proxyID, "address": proxyAddress, @@ -381,6 +389,9 @@ func (s *ProxyServiceServer) snapshotServiceMappings(ctx context.Context, conn * } m := service.ToProtoMapping(rpservice.Create, token, s.GetOIDCValidationConfig()) + if !proxyAcceptsMapping(conn, m) { + continue + } mappings = append(mappings, m) } return mappings, nil @@ -580,25 +591,49 @@ func (s *ProxyServiceServer) SendServiceUpdateToCluster(ctx context.Context, upd log.Debugf("Sending service update to cluster %s", clusterAddr) for _, proxyID := range proxyIDs { - if connVal, ok := s.connectedProxies.Load(proxyID); ok { - conn := connVal.(*proxyConnection) - if conn.accountID != nil && update.AccountId != "" && *conn.accountID != update.AccountId { - continue - } - msg := s.perProxyMessage(updateResponse, proxyID) - if msg == nil { - continue - } - select { - case conn.sendChan <- msg: - log.WithContext(ctx).Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) - default: - log.WithContext(ctx).Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) - } + connVal, ok := s.connectedProxies.Load(proxyID) + if !ok { + continue + } + conn := connVal.(*proxyConnection) + if conn.accountID != nil && update.AccountId != "" && *conn.accountID != update.AccountId { + continue + } + if !proxyAcceptsMapping(conn, update) { + log.WithContext(ctx).Debugf("Skipping proxy %s: does not support custom ports for mapping %s", proxyID, update.Id) + continue + } + msg := s.perProxyMessage(updateResponse, proxyID) + if msg == nil { + continue + } + select { + case conn.sendChan <- msg: + log.WithContext(ctx).Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr) + default: + log.WithContext(ctx).Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr) } } } +// proxyAcceptsMapping returns whether the proxy should receive this mapping. +// Old proxies that never reported capabilities are skipped for non-TLS L4 +// mappings with a custom listen port, since they don't understand the +// protocol. Proxies that report capabilities (even SupportsCustomPorts=false) +// are new enough to handle the mapping. TLS uses SNI routing and works on +// any proxy. Delete operations are always sent so proxies can clean up. +func proxyAcceptsMapping(conn *proxyConnection, mapping *proto.ProxyMapping) bool { + if mapping.Type == proto.ProxyMappingUpdateType_UPDATE_TYPE_REMOVED { + return true + } + if mapping.ListenPort == 0 || mapping.Mode == "tls" { + return true + } + // Old proxies that never reported capabilities don't understand + // custom port mappings. + return conn.capabilities != nil && conn.capabilities.SupportsCustomPorts != nil +} + // perProxyMessage returns a copy of update with a fresh one-time token for // create/update operations. For delete operations the original mapping is // used unchanged because proxies do not need to authenticate for removal. @@ -646,64 +681,6 @@ func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping { } } -// ClusterSupportsCustomPorts returns whether any connected proxy in the given -// cluster reports custom port support. Returns nil if no proxy has reported -// capabilities (old proxies that predate the field). -func (s *ProxyServiceServer) ClusterSupportsCustomPorts(clusterAddr string) *bool { - if s.proxyController == nil { - return nil - } - - var hasCapabilities bool - for _, pid := range s.proxyController.GetProxiesForCluster(clusterAddr) { - connVal, ok := s.connectedProxies.Load(pid) - if !ok { - continue - } - conn := connVal.(*proxyConnection) - if conn.capabilities == nil || conn.capabilities.SupportsCustomPorts == nil { - continue - } - if *conn.capabilities.SupportsCustomPorts { - return ptr(true) - } - hasCapabilities = true - } - if hasCapabilities { - return ptr(false) - } - return nil -} - -// ClusterRequireSubdomain returns whether any connected proxy in the given -// cluster reports that a subdomain is required. Returns nil if no proxy has -// reported the capability (defaults to not required). -func (s *ProxyServiceServer) ClusterRequireSubdomain(clusterAddr string) *bool { - if s.proxyController == nil { - return nil - } - - var hasCapabilities bool - for _, pid := range s.proxyController.GetProxiesForCluster(clusterAddr) { - connVal, ok := s.connectedProxies.Load(pid) - if !ok { - continue - } - conn := connVal.(*proxyConnection) - if conn.capabilities == nil || conn.capabilities.RequireSubdomain == nil { - continue - } - if *conn.capabilities.RequireSubdomain { - return ptr(true) - } - hasCapabilities = true - } - if hasCapabilities { - return ptr(false) - } - return nil -} - func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { if err := enforceAccountScope(ctx, req.GetAccountId()); err != nil { return nil, err diff --git a/management/internals/shared/grpc/proxy_test.go b/management/internals/shared/grpc/proxy_test.go index 7c9c7d9b5..813bef5f1 100644 --- a/management/internals/shared/grpc/proxy_test.go +++ b/management/internals/shared/grpc/proxy_test.go @@ -56,14 +56,6 @@ func (c *testProxyController) UnregisterProxyFromCluster(_ context.Context, clus return nil } -func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { - return ptr(true) -} - -func (c *testProxyController) ClusterRequireSubdomain(_ string) *bool { - return nil -} - func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string { c.mu.Lock() defer c.mu.Unlock() @@ -410,14 +402,14 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { const cluster = "proxy.example.com" - // Proxy A supports custom ports. - chA := registerFakeProxyWithCaps(s, "proxy-a", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) - // Proxy B does NOT support custom ports (shared cloud proxy). - chB := registerFakeProxyWithCaps(s, "proxy-b", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + // Modern proxy reports capabilities. + chModern := registerFakeProxyWithCaps(s, "proxy-modern", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) + // Legacy proxy never reported capabilities (nil). + chLegacy := registerFakeProxy(s, "proxy-legacy", cluster) ctx := context.Background() - // TLS passthrough works on all proxies regardless of custom port support. + // TLS passthrough with custom port: all proxies receive it (SNI routing). tlsMapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, Id: "service-tls", @@ -430,12 +422,26 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { s.SendServiceUpdateToCluster(ctx, tlsMapping, cluster) - msgA := drainMapping(chA) - msgB := drainMapping(chB) - assert.NotNil(t, msgA, "proxy-a should receive TLS mapping") - assert.NotNil(t, msgB, "proxy-b should receive TLS mapping (passthrough works on all proxies)") + assert.NotNil(t, drainMapping(chModern), "modern proxy should receive TLS mapping") + assert.NotNil(t, drainMapping(chLegacy), "legacy proxy should receive TLS mapping (SNI works on all)") - // Send an HTTP mapping: both should receive it. + // TCP mapping with custom port: only modern proxy receives it. + tcpMapping := &proto.ProxyMapping{ + Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, + Id: "service-tcp", + AccountId: "account-1", + Domain: "db.example.com", + Mode: "tcp", + ListenPort: 5432, + Path: []*proto.PathMapping{{Target: "10.0.0.5:5432"}}, + } + + s.SendServiceUpdateToCluster(ctx, tcpMapping, cluster) + + assert.NotNil(t, drainMapping(chModern), "modern proxy should receive TCP custom-port mapping") + assert.Nil(t, drainMapping(chLegacy), "legacy proxy should NOT receive TCP custom-port mapping") + + // HTTP mapping (no listen port): both receive it. httpMapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, Id: "service-http", @@ -446,10 +452,16 @@ func TestSendServiceUpdateToCluster_FiltersOnCapability(t *testing.T) { s.SendServiceUpdateToCluster(ctx, httpMapping, cluster) - msgA = drainMapping(chA) - msgB = drainMapping(chB) - assert.NotNil(t, msgA, "proxy-a should receive HTTP mapping") - assert.NotNil(t, msgB, "proxy-b should receive HTTP mapping") + assert.NotNil(t, drainMapping(chModern), "modern proxy should receive HTTP mapping") + assert.NotNil(t, drainMapping(chLegacy), "legacy proxy should receive HTTP mapping") + + // Proxy that reports SupportsCustomPorts=false still receives custom-port + // mappings because it understands the protocol (it's new enough). + chNewNoCustom := registerFakeProxyWithCaps(s, "proxy-new-no-custom", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + + s.SendServiceUpdateToCluster(ctx, tcpMapping, cluster) + + assert.NotNil(t, drainMapping(chNewNoCustom), "new proxy with SupportsCustomPorts=false should still receive mapping") } func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { @@ -463,7 +475,8 @@ func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { const cluster = "proxy.example.com" - chShared := registerFakeProxyWithCaps(s, "proxy-shared", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + // Legacy proxy (no capabilities) still receives TLS since it uses SNI. + chLegacy := registerFakeProxy(s, "proxy-legacy", cluster) tlsMapping := &proto.ProxyMapping{ Type: proto.ProxyMappingUpdateType_UPDATE_TYPE_CREATED, @@ -476,8 +489,8 @@ func TestSendServiceUpdateToCluster_TLSNotFiltered(t *testing.T) { s.SendServiceUpdateToCluster(context.Background(), tlsMapping, cluster) - msg := drainMapping(chShared) - assert.NotNil(t, msg, "shared proxy should receive TLS mapping even without custom port support") + msg := drainMapping(chLegacy) + assert.NotNil(t, msg, "legacy proxy should receive TLS mapping (SNI works without custom port support)") } // TestServiceModifyNotifications exercises every possible modification @@ -644,7 +657,7 @@ func TestServiceModifyNotifications(t *testing.T) { s.SetProxyController(newTestProxyController()) const cluster = "proxy.example.com" chModern := registerFakeProxyWithCaps(s, "modern", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(true)}) - chLegacy := registerFakeProxyWithCaps(s, "legacy", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + chLegacy := registerFakeProxy(s, "legacy", cluster) // TLS passthrough works on all proxies regardless of custom port support s.SendServiceUpdateToCluster(ctx, tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED), cluster) @@ -663,7 +676,7 @@ func TestServiceModifyNotifications(t *testing.T) { } s.SetProxyController(newTestProxyController()) const cluster = "proxy.example.com" - chLegacy := registerFakeProxyWithCaps(s, "legacy", cluster, &proto.ProxyCapabilities{SupportsCustomPorts: ptr(false)}) + chLegacy := registerFakeProxy(s, "legacy", cluster) mapping := tlsOnlyMapping(proto.ProxyMappingUpdateType_UPDATE_TYPE_MODIFIED) mapping.ListenPort = 0 // default port diff --git a/management/server/account.go b/management/server/account.go index 75db36a5f..d90b46659 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -742,11 +742,6 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err) } - err = am.serviceManager.DeleteAllServices(ctx, accountID, userID) - if err != nil { - return status.Errorf(status.Internal, "failed to delete service %s: %v", accountID, err) - } - for _, otherUser := range account.Users { if otherUser.Id == userID { continue diff --git a/management/server/account/manager.go b/management/server/account/manager.go index 45af63ae8..b4516d512 100644 --- a/management/server/account/manager.go +++ b/management/server/account/manager.go @@ -75,7 +75,7 @@ type Manager interface { GetUsersFromAccount(ctx context.Context, accountID, userID string) (map[string]*types.UserInfo, error) GetGroup(ctx context.Context, accountId, groupID, userID string) (*types.Group, error) GetAllGroups(ctx context.Context, accountID, userID string) ([]*types.Group, error) - GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) + GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error UpdateGroup(ctx context.Context, accountID, userID string, group *types.Group) error CreateGroups(ctx context.Context, accountID, userID string, newGroups []*types.Group) error diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go index 90700c795..36e5fe39f 100644 --- a/management/server/account/manager_mock.go +++ b/management/server/account/manager_mock.go @@ -736,18 +736,18 @@ func (mr *MockManagerMockRecorder) GetGroup(ctx, accountId, groupID, userID inte } // GetGroupByName mocks base method. -func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) { +func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID) + ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID, userID) ret0, _ := ret[0].(*types.Group) ret1, _ := ret[1].(error) return ret0, ret1 } // GetGroupByName indicates an expected call of GetGroupByName. -func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID, userID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID, userID) } // GetIdentityProvider mocks base method. diff --git a/management/server/account_test.go b/management/server/account_test.go index fb42a47bf..3147b3f56 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -15,7 +15,6 @@ import ( "time" "github.com/golang/mock/gomock" - "github.com/netbirdio/netbird/shared/management/status" "github.com/prometheus/client_golang/prometheus/push" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -23,6 +22,9 @@ import ( "go.opentelemetry.io/otel/metric/noop" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + "github.com/netbirdio/netbird/shared/management/status" + nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/internals/controllers/network_map" "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller" @@ -1815,6 +1817,13 @@ func TestAccount_Copy(t *testing.T) { Targets: []*service.Target{}, }, }, + Domains: []*domain.Domain{ + { + ID: "domain1", + Domain: "test.com", + AccountID: "account1", + }, + }, NetworkMapCache: &types.NetworkMapBuilder{}, } account.InitOnce() @@ -3138,7 +3147,7 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU if err != nil { return nil, nil, err } - manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyController, nil)) + manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyController, proxyManager, nil)) return manager, updateManager, nil } diff --git a/management/server/activity/store/sql_store_idp_migration.go b/management/server/activity/store/sql_store_idp_migration.go new file mode 100644 index 000000000..1b3a9ecd9 --- /dev/null +++ b/management/server/activity/store/sql_store_idp_migration.go @@ -0,0 +1,61 @@ +package store + +// This file contains migration-only methods on Store. +// They satisfy the migration.MigrationEventStore interface via duck typing. +// Delete this file when migration tooling is no longer needed. + +import ( + "context" + "fmt" + + "gorm.io/gorm" + + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/management/server/idp/migration" +) + +// CheckSchema verifies that all tables and columns required by the migration exist in the event database. +func (store *Store) CheckSchema(checks []migration.SchemaCheck) []migration.SchemaError { + migrator := store.db.Migrator() + var errs []migration.SchemaError + + for _, check := range checks { + if !migrator.HasTable(check.Table) { + errs = append(errs, migration.SchemaError{Table: check.Table}) + continue + } + for _, col := range check.Columns { + if !migrator.HasColumn(check.Table, col) { + errs = append(errs, migration.SchemaError{Table: check.Table, Column: col}) + } + } + } + + return errs +} + +// UpdateUserID updates all references to oldUserID in events and deleted_users tables. +func (store *Store) UpdateUserID(ctx context.Context, oldUserID, newUserID string) error { + return store.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + if err := tx.Model(&activity.Event{}). + Where("initiator_id = ?", oldUserID). + Update("initiator_id", newUserID).Error; err != nil { + return fmt.Errorf("update events.initiator_id: %w", err) + } + + if err := tx.Model(&activity.Event{}). + Where("target_id = ?", oldUserID). + Update("target_id", newUserID).Error; err != nil { + return fmt.Errorf("update events.target_id: %w", err) + } + + // Raw exec: GORM can't update a PK via Model().Update() + if err := tx.Exec( + "UPDATE deleted_users SET id = ? WHERE id = ?", newUserID, oldUserID, + ).Error; err != nil { + return fmt.Errorf("update deleted_users.id: %w", err) + } + + return nil + }) +} diff --git a/management/server/activity/store/sql_store_idp_migration_test.go b/management/server/activity/store/sql_store_idp_migration_test.go new file mode 100644 index 000000000..98b6e1327 --- /dev/null +++ b/management/server/activity/store/sql_store_idp_migration_test.go @@ -0,0 +1,161 @@ +package store + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/util/crypt" +) + +func TestUpdateUserID(t *testing.T) { + ctx := context.Background() + + newStore := func(t *testing.T) *Store { + t.Helper() + key, _ := crypt.GenerateKey() + s, err := NewSqlStore(ctx, t.TempDir(), key) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { s.Close(ctx) }) //nolint + return s + } + + t.Run("updates initiator_id in events", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "old-user", + TargetID: "some-peer", + AccountID: accountID, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "old-user", "new-user") + assert.NoError(t, err) + + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 1) + assert.Equal(t, "new-user", result[0].InitiatorID) + }) + + t.Run("updates target_id in events", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "some-admin", + TargetID: "old-user", + AccountID: accountID, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "old-user", "new-user") + assert.NoError(t, err) + + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 1) + assert.Equal(t, "new-user", result[0].TargetID) + }) + + t.Run("updates deleted_users id", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + // Save an event with email/name meta to create a deleted_users row for "old-user" + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "admin", + TargetID: "old-user", + AccountID: accountID, + Meta: map[string]any{ + "email": "user@example.com", + "name": "Test User", + }, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "old-user", "new-user") + assert.NoError(t, err) + + // Save another event referencing new-user with email/name meta. + // This should upsert (not conflict) because the PK was already migrated. + _, err = store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "admin", + TargetID: "new-user", + AccountID: accountID, + Meta: map[string]any{ + "email": "user@example.com", + "name": "Test User", + }, + }) + assert.NoError(t, err) + + // The deleted user info should be retrievable via Get (joined on target_id) + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 2) + for _, ev := range result { + assert.Equal(t, "new-user", ev.TargetID) + } + }) + + t.Run("no-op when old user ID does not exist", func(t *testing.T) { + store := newStore(t) + + err := store.UpdateUserID(ctx, "nonexistent-user", "new-user") + assert.NoError(t, err) + }) + + t.Run("only updates matching user leaves others unchanged", func(t *testing.T) { + store := newStore(t) + accountID := "account_1" + + _, err := store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "user-a", + TargetID: "peer-1", + AccountID: accountID, + }) + assert.NoError(t, err) + + _, err = store.Save(ctx, &activity.Event{ + Timestamp: time.Now().UTC(), + Activity: activity.PeerAddedByUser, + InitiatorID: "user-b", + TargetID: "peer-2", + AccountID: accountID, + }) + assert.NoError(t, err) + + err = store.UpdateUserID(ctx, "user-a", "user-a-new") + assert.NoError(t, err) + + result, err := store.Get(ctx, accountID, 0, 10, false) + assert.NoError(t, err) + assert.Len(t, result, 2) + + for _, ev := range result { + if ev.TargetID == "peer-1" { + assert.Equal(t, "user-a-new", ev.InitiatorID) + } else { + assert.Equal(t, "user-b", ev.InitiatorID) + } + } + }) +} diff --git a/management/server/auth/manager.go b/management/server/auth/manager.go index 76cc750b6..27346a604 100644 --- a/management/server/auth/manager.go +++ b/management/server/auth/manager.go @@ -33,15 +33,20 @@ type manager struct { extractor *nbjwt.ClaimsExtractor } -func NewManager(store store.Store, issuer, audience, keysLocation, userIdClaim string, allAudiences []string, idpRefreshKeys bool) Manager { - // @note if invalid/missing parameters are sent the validator will instantiate - // but it will fail when validating and parsing the token - jwtValidator := nbjwt.NewValidator( - issuer, - allAudiences, - keysLocation, - idpRefreshKeys, - ) +func NewManager(store store.Store, issuer, audience, keysLocation, userIdClaim string, allAudiences []string, idpRefreshKeys bool, keyFetcher nbjwt.KeyFetcher) Manager { + var jwtValidator *nbjwt.Validator + if keyFetcher != nil { + jwtValidator = nbjwt.NewValidatorWithKeyFetcher(issuer, allAudiences, keyFetcher) + } else { + // @note if invalid/missing parameters are sent the validator will instantiate + // but it will fail when validating and parsing the token + jwtValidator = nbjwt.NewValidator( + issuer, + allAudiences, + keysLocation, + idpRefreshKeys, + ) + } claimsExtractor := nbjwt.NewClaimsExtractor( nbjwt.WithAudience(audience), diff --git a/management/server/auth/manager_test.go b/management/server/auth/manager_test.go index b9f091b1e..469737f47 100644 --- a/management/server/auth/manager_test.go +++ b/management/server/auth/manager_test.go @@ -52,7 +52,7 @@ func TestAuthManager_GetAccountInfoFromPAT(t *testing.T) { t.Fatalf("Error when saving account: %s", err) } - manager := auth.NewManager(store, "", "", "", "", []string{}, false) + manager := auth.NewManager(store, "", "", "", "", []string{}, false, nil) user, pat, _, _, err := manager.GetPATInfo(context.Background(), token) if err != nil { @@ -92,7 +92,7 @@ func TestAuthManager_MarkPATUsed(t *testing.T) { t.Fatalf("Error when saving account: %s", err) } - manager := auth.NewManager(store, "", "", "", "", []string{}, false) + manager := auth.NewManager(store, "", "", "", "", []string{}, false, nil) err = manager.MarkPATUsed(context.Background(), "tokenId") if err != nil { @@ -142,7 +142,7 @@ func TestAuthManager_EnsureUserAccessByJWTGroups(t *testing.T) { // these tests only assert groups are parsed from token as per account settings token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{"idp-groups": []interface{}{"group1", "group2"}}) - manager := auth.NewManager(store, "", "", "", "", []string{}, false) + manager := auth.NewManager(store, "", "", "", "", []string{}, false, nil) t.Run("JWT groups disabled", func(t *testing.T) { userAuth, err := manager.EnsureUserAccessByJWTGroups(context.Background(), userAuth, token) @@ -225,7 +225,7 @@ func TestAuthManager_ValidateAndParseToken(t *testing.T) { keyId := "test-key" // note, we can use a nil store because ValidateAndParseToken does not use it in it's flow - manager := auth.NewManager(nil, issuer, audience, server.URL, userIdClaim, []string{audience}, false) + manager := auth.NewManager(nil, issuer, audience, server.URL, userIdClaim, []string{audience}, false, nil) customClaim := func(name string) string { return fmt.Sprintf("%s/%s", audience, name) diff --git a/management/server/geolocation/geolocation.go b/management/server/geolocation/geolocation.go index 30fd493e8..0af3ce2f6 100644 --- a/management/server/geolocation/geolocation.go +++ b/management/server/geolocation/geolocation.go @@ -130,6 +130,10 @@ func (gl *geolocationImpl) Lookup(ip net.IP) (*Record, error) { gl.mux.RLock() defer gl.mux.RUnlock() + if gl.db == nil { + return nil, fmt.Errorf("geolocation database is not available") + } + var record Record err := gl.db.Lookup(ip, &record) if err != nil { @@ -173,8 +177,14 @@ func (gl *geolocationImpl) GetCitiesByCountry(countryISOCode string) ([]City, er func (gl *geolocationImpl) Stop() error { close(gl.stopCh) - if gl.db != nil { - if err := gl.db.Close(); err != nil { + + gl.mux.Lock() + db := gl.db + gl.db = nil + gl.mux.Unlock() + + if db != nil { + if err := db.Close(); err != nil { return err } } diff --git a/management/server/group.go b/management/server/group.go index 326b167cf..7b5b9b86c 100644 --- a/management/server/group.go +++ b/management/server/group.go @@ -61,7 +61,10 @@ func (am *DefaultAccountManager) GetAllGroups(ctx context.Context, accountID, us } // GetGroupByName filters all groups in an account by name and returns the one with the most peers -func (am *DefaultAccountManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) { +func (am *DefaultAccountManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { + if err := am.CheckGroupPermissions(ctx, accountID, userID); err != nil { + return nil, err + } return am.Store.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName) } diff --git a/management/server/http/handlers/groups/groups_handler.go b/management/server/http/handlers/groups/groups_handler.go index 56ccc9d0b..f8d161a87 100644 --- a/management/server/http/handlers/groups/groups_handler.go +++ b/management/server/http/handlers/groups/groups_handler.go @@ -52,7 +52,7 @@ func (h *handler) getAllGroups(w http.ResponseWriter, r *http.Request) { groupName := r.URL.Query().Get("name") if groupName != "" { // Get single group by name - group, err := h.accountManager.GetGroupByName(r.Context(), groupName, accountID) + group, err := h.accountManager.GetGroupByName(r.Context(), groupName, accountID, userID) if err != nil { util.WriteError(r.Context(), err, w) return @@ -118,7 +118,7 @@ func (h *handler) updateGroup(w http.ResponseWriter, r *http.Request) { return } - allGroup, err := h.accountManager.GetGroupByName(r.Context(), "All", accountID) + allGroup, err := h.accountManager.GetGroupByName(r.Context(), "All", accountID, userID) if err != nil { util.WriteError(r.Context(), err, w) return diff --git a/management/server/http/handlers/groups/groups_handler_test.go b/management/server/http/handlers/groups/groups_handler_test.go index 458a15c11..c7b4cbcdd 100644 --- a/management/server/http/handlers/groups/groups_handler_test.go +++ b/management/server/http/handlers/groups/groups_handler_test.go @@ -71,7 +71,7 @@ func initGroupTestData(initGroups ...*types.Group) *handler { return groups, nil }, - GetGroupByNameFunc: func(ctx context.Context, groupName, _ string) (*types.Group, error) { + GetGroupByNameFunc: func(ctx context.Context, groupName, _, _ string) (*types.Group, error) { if groupName == "All" { return &types.Group{ID: "id-all", Name: "All", Issued: types.GroupIssuedAPI}, nil } diff --git a/management/server/http/testing/integration/accounts_handler_integration_test.go b/management/server/http/testing/integration/accounts_handler_integration_test.go new file mode 100644 index 000000000..511730ee5 --- /dev/null +++ b/management/server/http/testing/integration/accounts_handler_integration_test.go @@ -0,0 +1,238 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Accounts_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all accounts", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/accounts.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/accounts", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Account{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + account := got[0] + assert.Equal(t, "test.com", account.Domain) + assert.Equal(t, "private", account.DomainCategory) + assert.Equal(t, true, account.Settings.PeerLoginExpirationEnabled) + assert.Equal(t, 86400, account.Settings.PeerLoginExpiration) + assert.Equal(t, false, account.Settings.RegularUsersViewBlocked) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Accounts_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + trueVal := true + falseVal := false + + tt := []struct { + name string + expectedStatus int + requestBody *api.AccountRequest + verifyResponse func(t *testing.T, account *api.Account) + verifyDB func(t *testing.T, account *types.Account) + }{ + { + name: "Disable peer login expiration", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: false, + PeerLoginExpiration: 86400, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.Equal(t, false, account.Settings.PeerLoginExpirationEnabled) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, false, dbAccount.Settings.PeerLoginExpirationEnabled) + }, + }, + { + name: "Update peer login expiration to 48h", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 172800, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.Equal(t, 172800, account.Settings.PeerLoginExpiration) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, 172800*time.Second, dbAccount.Settings.PeerLoginExpiration) + }, + }, + { + name: "Enable regular users view blocked", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 86400, + RegularUsersViewBlocked: true, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.Equal(t, true, account.Settings.RegularUsersViewBlocked) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, true, dbAccount.Settings.RegularUsersViewBlocked) + }, + }, + { + name: "Enable groups propagation", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 86400, + GroupsPropagationEnabled: &trueVal, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.NotNil(t, account.Settings.GroupsPropagationEnabled) + assert.Equal(t, true, *account.Settings.GroupsPropagationEnabled) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, true, dbAccount.Settings.GroupsPropagationEnabled) + }, + }, + { + name: "Enable JWT groups", + requestBody: &api.AccountRequest{ + Settings: api.AccountSettings{ + PeerLoginExpirationEnabled: true, + PeerLoginExpiration: 86400, + GroupsPropagationEnabled: &falseVal, + JwtGroupsEnabled: &trueVal, + JwtGroupsClaimName: stringPointer("groups"), + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, account *api.Account) { + t.Helper() + assert.NotNil(t, account.Settings.JwtGroupsEnabled) + assert.Equal(t, true, *account.Settings.JwtGroupsEnabled) + assert.NotNil(t, account.Settings.JwtGroupsClaimName) + assert.Equal(t, "groups", *account.Settings.JwtGroupsClaimName) + }, + verifyDB: func(t *testing.T, dbAccount *types.Account) { + t.Helper() + assert.Equal(t, true, dbAccount.Settings.JWTGroupsEnabled) + assert.Equal(t, "groups", dbAccount.Settings.JWTGroupsClaimName) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/accounts.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/accounts/{accountId}", "{accountId}", testing_tools.TestAccountId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + got := &api.Account{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, testing_tools.TestAccountId, got.Id) + assert.Equal(t, "test.com", got.Domain) + tc.verifyResponse(t, got) + + db := testing_tools.GetDB(t, am.GetStore()) + dbAccount := testing_tools.VerifyAccountSettings(t, db) + tc.verifyDB(t, dbAccount) + }) + } + } +} + +func stringPointer(s string) *string { + return &s +} diff --git a/management/server/http/testing/integration/dns_handler_integration_test.go b/management/server/http/testing/integration/dns_handler_integration_test.go new file mode 100644 index 000000000..7ada5e462 --- /dev/null +++ b/management/server/http/testing/integration/dns_handler_integration_test.go @@ -0,0 +1,554 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Nameservers_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all nameservers", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/dns/nameservers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.NameserverGroup{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testNSGroup", got[0].Name) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Nameservers_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + nsGroupId string + expectedStatus int + expectGroup bool + }{ + { + name: "Get existing nameserver group", + nsGroupId: "testNSGroupId", + expectedStatus: http.StatusOK, + expectGroup: true, + }, + { + name: "Get non-existing nameserver group", + nsGroupId: "nonExistingNSGroupId", + expectedStatus: http.StatusNotFound, + expectGroup: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/dns/nameservers/{nsgroupId}", "{nsgroupId}", tc.nsGroupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectGroup { + got := &api.NameserverGroup{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, "testNSGroupId", got.Id) + assert.Equal(t, "testNSGroup", got.Name) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Nameservers_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.PostApiDnsNameserversJSONRequestBody + expectedStatus int + verifyResponse func(t *testing.T, nsGroup *api.NameserverGroup) + }{ + { + name: "Create nameserver group with single NS", + requestBody: &api.PostApiDnsNameserversJSONRequestBody{ + Name: "newNSGroup", + Description: "a new nameserver group", + Nameservers: []api.Nameserver{ + {Ip: "8.8.8.8", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: false, + Domains: []string{"test.com"}, + Enabled: true, + SearchDomainsEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, nsGroup *api.NameserverGroup) { + t.Helper() + assert.NotEmpty(t, nsGroup.Id) + assert.Equal(t, "newNSGroup", nsGroup.Name) + assert.Equal(t, 1, len(nsGroup.Nameservers)) + assert.Equal(t, false, nsGroup.Primary) + }, + }, + { + name: "Create primary nameserver group", + requestBody: &api.PostApiDnsNameserversJSONRequestBody{ + Name: "primaryNS", + Description: "primary nameserver", + Nameservers: []api.Nameserver{ + {Ip: "1.1.1.1", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: true, + Domains: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, nsGroup *api.NameserverGroup) { + t.Helper() + assert.Equal(t, true, nsGroup.Primary) + }, + }, + { + name: "Create nameserver group with empty groups", + requestBody: &api.PostApiDnsNameserversJSONRequestBody{ + Name: "emptyGroupsNS", + Description: "no groups", + Nameservers: []api.Nameserver{ + {Ip: "8.8.8.8", NsType: "udp", Port: 53}, + }, + Groups: []string{}, + Primary: true, + Domains: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/dns/nameservers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NameserverGroup{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify the created NS group directly in the DB + db := testing_tools.GetDB(t, am.GetStore()) + dbNS := testing_tools.VerifyNSGroupInDB(t, db, got.Id) + assert.Equal(t, got.Name, dbNS.Name) + assert.Equal(t, got.Primary, dbNS.Primary) + assert.Equal(t, len(got.Nameservers), len(dbNS.NameServers)) + assert.Equal(t, got.Enabled, dbNS.Enabled) + assert.Equal(t, got.SearchDomainsEnabled, dbNS.SearchDomainsEnabled) + } + }) + } + } +} + +func Test_Nameservers_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + nsGroupId string + requestBody *api.PutApiDnsNameserversNsgroupIdJSONRequestBody + expectedStatus int + verifyResponse func(t *testing.T, nsGroup *api.NameserverGroup) + }{ + { + name: "Update nameserver group name", + nsGroupId: "testNSGroupId", + requestBody: &api.PutApiDnsNameserversNsgroupIdJSONRequestBody{ + Name: "updatedNSGroup", + Description: "updated description", + Nameservers: []api.Nameserver{ + {Ip: "1.1.1.1", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: false, + Domains: []string{"example.com"}, + Enabled: true, + SearchDomainsEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, nsGroup *api.NameserverGroup) { + t.Helper() + assert.Equal(t, "updatedNSGroup", nsGroup.Name) + assert.Equal(t, "updated description", nsGroup.Description) + }, + }, + { + name: "Update non-existing nameserver group", + nsGroupId: "nonExistingNSGroupId", + requestBody: &api.PutApiDnsNameserversNsgroupIdJSONRequestBody{ + Name: "whatever", + Nameservers: []api.Nameserver{ + {Ip: "1.1.1.1", NsType: "udp", Port: 53}, + }, + Groups: []string{testing_tools.TestGroupId}, + Primary: true, + Domains: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/dns/nameservers/{nsgroupId}", "{nsgroupId}", tc.nsGroupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NameserverGroup{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify the updated NS group directly in the DB + db := testing_tools.GetDB(t, am.GetStore()) + dbNS := testing_tools.VerifyNSGroupInDB(t, db, tc.nsGroupId) + assert.Equal(t, "updatedNSGroup", dbNS.Name) + assert.Equal(t, "updated description", dbNS.Description) + assert.Equal(t, false, dbNS.Primary) + assert.Equal(t, true, dbNS.Enabled) + assert.Equal(t, 1, len(dbNS.NameServers)) + assert.Equal(t, false, dbNS.SearchDomainsEnabled) + } + }) + } + } +} + +func Test_Nameservers_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + nsGroupId string + expectedStatus int + }{ + { + name: "Delete existing nameserver group", + nsGroupId: "testNSGroupId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing nameserver group", + nsGroupId: "nonExistingNSGroupId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/dns/nameservers/{nsgroupId}", "{nsgroupId}", tc.nsGroupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify deletion in DB for successful deletes by privileged users + if tc.expectedStatus == http.StatusOK && user.expectResponse { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyNSGroupNotInDB(t, db, tc.nsGroupId) + } + }) + } + } +} + +func Test_DnsSettings_Get(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get DNS settings", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/dns/settings", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := &api.DNSSettings{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.NotNil(t, got.DisabledManagementGroups) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_DnsSettings_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.PutApiDnsSettingsJSONRequestBody + expectedStatus int + verifyResponse func(t *testing.T, settings *api.DNSSettings) + expectedDBDisabledMgmtLen int + expectedDBDisabledMgmtItem string + }{ + { + name: "Update disabled management groups", + requestBody: &api.PutApiDnsSettingsJSONRequestBody{ + DisabledManagementGroups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, settings *api.DNSSettings) { + t.Helper() + assert.Equal(t, 1, len(settings.DisabledManagementGroups)) + assert.Equal(t, testing_tools.TestGroupId, settings.DisabledManagementGroups[0]) + }, + expectedDBDisabledMgmtLen: 1, + expectedDBDisabledMgmtItem: testing_tools.TestGroupId, + }, + { + name: "Update with empty disabled management groups", + requestBody: &api.PutApiDnsSettingsJSONRequestBody{ + DisabledManagementGroups: []string{}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, settings *api.DNSSettings) { + t.Helper() + assert.Equal(t, 0, len(settings.DisabledManagementGroups)) + }, + expectedDBDisabledMgmtLen: 0, + }, + { + name: "Update with non-existing group", + requestBody: &api.PutApiDnsSettingsJSONRequestBody{ + DisabledManagementGroups: []string{"nonExistingGroupId"}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/dns.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, "/api/dns/settings", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.DNSSettings{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify DNS settings directly in the DB + db := testing_tools.GetDB(t, am.GetStore()) + dbAccount := testing_tools.VerifyAccountSettings(t, db) + assert.Equal(t, tc.expectedDBDisabledMgmtLen, len(dbAccount.DNSSettings.DisabledManagementGroups)) + if tc.expectedDBDisabledMgmtItem != "" { + assert.Contains(t, dbAccount.DNSSettings.DisabledManagementGroups, tc.expectedDBDisabledMgmtItem) + } + } + }) + } + } +} diff --git a/management/server/http/testing/integration/events_handler_integration_test.go b/management/server/http/testing/integration/events_handler_integration_test.go new file mode 100644 index 000000000..6611b60ee --- /dev/null +++ b/management/server/http/testing/integration/events_handler_integration_test.go @@ -0,0 +1,105 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Events_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all events", func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/events.sql", nil, false) + + // First, perform a mutation to generate an event (create a group as admin) + groupBody, err := json.Marshal(&api.GroupRequest{Name: "eventTestGroup"}) + if err != nil { + t.Fatalf("Failed to marshal group request: %v", err) + } + createReq := testing_tools.BuildRequest(t, groupBody, http.MethodPost, "/api/groups", testing_tools.TestAdminId) + createRecorder := httptest.NewRecorder() + apiHandler.ServeHTTP(createRecorder, createReq) + assert.Equal(t, http.StatusOK, createRecorder.Code, "Failed to create group to generate event") + + // Now query events + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/events", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Event{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1, "Expected at least one event after creating a group") + + // Verify the group creation event exists + found := false + for _, event := range got { + if event.ActivityCode == "group.add" { + found = true + assert.Equal(t, testing_tools.TestAdminId, event.InitiatorId) + assert.Equal(t, "Group created", event.Activity) + break + } + } + assert.True(t, found, "Expected to find a group.add event") + }) + } +} + +func Test_Events_GetAll_Empty(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/events.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/events", testing_tools.TestAdminId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + if !expectResponse { + return + } + + got := []api.Event{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 0, len(got), "Expected empty events list when no mutations have been performed") + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } +} diff --git a/management/server/http/testing/integration/groups_handler_integration_test.go b/management/server/http/testing/integration/groups_handler_integration_test.go new file mode 100644 index 000000000..edb43f3f3 --- /dev/null +++ b/management/server/http/testing/integration/groups_handler_integration_test.go @@ -0,0 +1,382 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Groups_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all groups", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/groups", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Group{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 2) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Groups_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + groupId string + expectedStatus int + expectGroup bool + }{ + { + name: "Get existing group", + groupId: testing_tools.TestGroupId, + expectedStatus: http.StatusOK, + expectGroup: true, + }, + { + name: "Get non-existing group", + groupId: "nonExistingGroupId", + expectedStatus: http.StatusNotFound, + expectGroup: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/groups/{groupId}", "{groupId}", tc.groupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectGroup { + got := &api.Group{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.groupId, got.Id) + assert.Equal(t, "testGroupName", got.Name) + assert.Equal(t, 1, got.PeersCount) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Groups_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.GroupRequest + expectedStatus int + verifyResponse func(t *testing.T, group *api.Group) + }{ + { + name: "Create group with valid name", + requestBody: &api.GroupRequest{ + Name: "brandNewGroup", + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.NotEmpty(t, group.Id) + assert.Equal(t, "brandNewGroup", group.Name) + assert.Equal(t, 0, group.PeersCount) + }, + }, + { + name: "Create group with peers", + requestBody: &api.GroupRequest{ + Name: "groupWithPeers", + Peers: &[]string{testing_tools.TestPeerId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.NotEmpty(t, group.Id) + assert.Equal(t, "groupWithPeers", group.Name) + assert.Equal(t, 1, group.PeersCount) + }, + }, + { + name: "Create group with empty name", + requestBody: &api.GroupRequest{ + Name: "", + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/groups", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Group{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify group exists in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbGroup := testing_tools.VerifyGroupInDB(t, db, got.Id) + assert.Equal(t, tc.requestBody.Name, dbGroup.Name) + } + }) + } + } +} + +func Test_Groups_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + groupId string + requestBody *api.GroupRequest + expectedStatus int + verifyResponse func(t *testing.T, group *api.Group) + }{ + { + name: "Update group name", + groupId: testing_tools.TestGroupId, + requestBody: &api.GroupRequest{ + Name: "updatedGroupName", + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.Equal(t, testing_tools.TestGroupId, group.Id) + assert.Equal(t, "updatedGroupName", group.Name) + }, + }, + { + name: "Update group peers", + groupId: testing_tools.TestGroupId, + requestBody: &api.GroupRequest{ + Name: "testGroupName", + Peers: &[]string{}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, group *api.Group) { + t.Helper() + assert.Equal(t, 0, group.PeersCount) + }, + }, + { + name: "Update with empty name", + groupId: testing_tools.TestGroupId, + requestBody: &api.GroupRequest{ + Name: "", + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Update non-existing group", + groupId: "nonExistingGroupId", + requestBody: &api.GroupRequest{ + Name: "someName", + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/groups/{groupId}", "{groupId}", tc.groupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Group{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated group in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbGroup := testing_tools.VerifyGroupInDB(t, db, tc.groupId) + assert.Equal(t, tc.requestBody.Name, dbGroup.Name) + } + }) + } + } +} + +func Test_Groups_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + groupId string + expectedStatus int + }{ + { + name: "Delete existing group not in use", + groupId: testing_tools.NewGroupId, + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing group", + groupId: "nonExistingGroupId", + expectedStatus: http.StatusBadRequest, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/groups.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/groups/{groupId}", "{groupId}", tc.groupId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyGroupNotInDB(t, db, tc.groupId) + } + }) + } + } +} diff --git a/management/server/http/testing/integration/networks_handler_integration_test.go b/management/server/http/testing/integration/networks_handler_integration_test.go new file mode 100644 index 000000000..4cb6b268b --- /dev/null +++ b/management/server/http/testing/integration/networks_handler_integration_test.go @@ -0,0 +1,1434 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Networks_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all networks", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.Network{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testNetworkId", got[0].Id) + assert.Equal(t, "testNetwork", got[0].Name) + assert.Equal(t, "test network description", *got[0].Description) + assert.GreaterOrEqual(t, len(got[0].Routers), 1) + assert.GreaterOrEqual(t, len(got[0].Resources), 1) + assert.GreaterOrEqual(t, got[0].RoutingPeersCount, 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Networks_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + expectedStatus int + expectNetwork bool + }{ + { + name: "Get existing network", + networkId: "testNetworkId", + expectedStatus: http.StatusOK, + expectNetwork: true, + }, + { + name: "Get non-existing network", + networkId: "nonExistingNetworkId", + expectedStatus: http.StatusNotFound, + expectNetwork: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/networks/{networkId}", "{networkId}", tc.networkId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectNetwork { + got := &api.Network{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.networkId, got.Id) + assert.Equal(t, "testNetwork", got.Name) + assert.Equal(t, "test network description", *got.Description) + assert.GreaterOrEqual(t, len(got.Routers), 1) + assert.GreaterOrEqual(t, len(got.Resources), 1) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Networks_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + desc := "new network description" + + tt := []struct { + name string + requestBody *api.NetworkRequest + expectedStatus int + verifyResponse func(t *testing.T, network *api.Network) + }{ + { + name: "Create network with name and description", + requestBody: &api.NetworkRequest{ + Name: "newNetwork", + Description: &desc, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, network *api.Network) { + t.Helper() + assert.NotEmpty(t, network.Id) + assert.Equal(t, "newNetwork", network.Name) + assert.Equal(t, "new network description", *network.Description) + assert.Empty(t, network.Routers) + assert.Empty(t, network.Resources) + assert.Equal(t, 0, network.RoutingPeersCount) + }, + }, + { + name: "Create network with name only", + requestBody: &api.NetworkRequest{ + Name: "simpleNetwork", + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, network *api.Network) { + t.Helper() + assert.NotEmpty(t, network.Id) + assert.Equal(t, "simpleNetwork", network.Name) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/networks", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Network{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_Networks_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + updatedDesc := "updated description" + + tt := []struct { + name string + networkId string + requestBody *api.NetworkRequest + expectedStatus int + verifyResponse func(t *testing.T, network *api.Network) + }{ + { + name: "Update network name", + networkId: "testNetworkId", + requestBody: &api.NetworkRequest{ + Name: "updatedNetwork", + Description: &updatedDesc, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, network *api.Network) { + t.Helper() + assert.Equal(t, "testNetworkId", network.Id) + assert.Equal(t, "updatedNetwork", network.Name) + assert.Equal(t, "updated description", *network.Description) + }, + }, + { + name: "Update non-existing network", + networkId: "nonExistingNetworkId", + requestBody: &api.NetworkRequest{ + Name: "whatever", + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/networks/{networkId}", "{networkId}", tc.networkId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Network{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_Networks_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + expectedStatus int + }{ + { + name: "Delete existing network", + networkId: "testNetworkId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing network", + networkId: "nonExistingNetworkId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/networks/{networkId}", "{networkId}", tc.networkId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + }) + } + } +} + +func Test_Networks_Delete_Cascades(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + // Delete the network + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, "/api/networks/testNetworkId", testing_tools.TestAdminId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + + // Verify network is gone + req = testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId", testing_tools.TestAdminId) + recorder = httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + testing_tools.ReadResponse(t, recorder, http.StatusNotFound, true) + + // Verify routers in that network are gone + req = testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/routers", testing_tools.TestAdminId) + recorder = httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + content, _ := testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + var routers []*api.NetworkRouter + require.NoError(t, json.Unmarshal(content, &routers)) + assert.Empty(t, routers) + + // Verify resources in that network are gone + req = testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/resources", testing_tools.TestAdminId) + recorder = httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + content, _ = testing_tools.ReadResponse(t, recorder, http.StatusOK, true) + var resources []*api.NetworkResource + require.NoError(t, json.Unmarshal(content, &resources)) + assert.Empty(t, resources) +} + +func Test_NetworkResources_GetAllInNetwork(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all resources in network", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/resources", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkResource{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testResourceId", got[0].Id) + assert.Equal(t, "testResource", got[0].Name) + assert.Equal(t, api.NetworkResourceType("host"), got[0].Type) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkResources_GetAllInAccount(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all resources in account", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/resources", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkResource{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkResources_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + resourceId string + expectedStatus int + expectResource bool + }{ + { + name: "Get existing resource", + networkId: "testNetworkId", + resourceId: "testResourceId", + expectedStatus: http.StatusOK, + expectResource: true, + }, + { + name: "Get non-existing resource", + networkId: "testNetworkId", + resourceId: "nonExistingResourceId", + expectedStatus: http.StatusNotFound, + expectResource: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + path := fmt.Sprintf("/api/networks/%s/resources/%s", tc.networkId, tc.resourceId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectResource { + got := &api.NetworkResource{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.resourceId, got.Id) + assert.Equal(t, "testResource", got.Name) + assert.Equal(t, api.NetworkResourceType("host"), got.Type) + assert.Equal(t, "3.3.3.3/32", got.Address) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_NetworkResources_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + desc := "new resource" + + tt := []struct { + name string + networkId string + requestBody *api.NetworkResourceRequest + expectedStatus int + verifyResponse func(t *testing.T, resource *api.NetworkResource) + }{ + { + name: "Create host resource with IP", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "hostResource", + Description: &desc, + Address: "1.1.1.1", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.NotEmpty(t, resource.Id) + assert.Equal(t, "hostResource", resource.Name) + assert.Equal(t, api.NetworkResourceType("host"), resource.Type) + assert.Equal(t, "1.1.1.1/32", resource.Address) + assert.True(t, resource.Enabled) + }, + }, + { + name: "Create host resource with CIDR /32", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "hostCIDR", + Address: "10.0.0.1/32", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("host"), resource.Type) + assert.Equal(t, "10.0.0.1/32", resource.Address) + }, + }, + { + name: "Create subnet resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "subnetResource", + Address: "192.168.0.0/24", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("subnet"), resource.Type) + assert.Equal(t, "192.168.0.0/24", resource.Address) + }, + }, + { + name: "Create domain resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "domainResource", + Address: "example.com", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("domain"), resource.Type) + assert.Equal(t, "example.com", resource.Address) + }, + }, + { + name: "Create wildcard domain resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "wildcardDomain", + Address: "*.example.com", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("domain"), resource.Type) + assert.Equal(t, "*.example.com", resource.Address) + }, + }, + { + name: "Create disabled resource", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "disabledResource", + Address: "5.5.5.5", + Groups: []string{testing_tools.TestGroupId}, + Enabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.False(t, resource.Enabled) + }, + }, + { + name: "Create resource with invalid address", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "invalidResource", + Address: "not-a-valid-address!!!", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusInternalServerError, + }, + { + name: "Create resource with empty groups", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "noGroupsResource", + Address: "7.7.7.7", + Groups: []string{}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.NotEmpty(t, resource.Id) + }, + }, + { + name: "Create resource with duplicate name", + networkId: "testNetworkId", + requestBody: &api.NetworkResourceRequest{ + Name: "testResource", + Address: "8.8.8.8", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/resources", tc.networkId) + req := testing_tools.BuildRequest(t, body, http.MethodPost, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkResource{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkResources_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + updatedDesc := "updated resource" + + tt := []struct { + name string + networkId string + resourceId string + requestBody *api.NetworkResourceRequest + expectedStatus int + verifyResponse func(t *testing.T, resource *api.NetworkResource) + }{ + { + name: "Update resource name and address", + networkId: "testNetworkId", + resourceId: "testResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "updatedResource", + Description: &updatedDesc, + Address: "4.4.4.4", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, "testResourceId", resource.Id) + assert.Equal(t, "updatedResource", resource.Name) + assert.Equal(t, "updated resource", *resource.Description) + assert.Equal(t, "4.4.4.4/32", resource.Address) + }, + }, + { + name: "Update resource to subnet type", + networkId: "testNetworkId", + resourceId: "testResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "testResource", + Address: "10.0.0.0/16", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("subnet"), resource.Type) + assert.Equal(t, "10.0.0.0/16", resource.Address) + }, + }, + { + name: "Update resource to domain type", + networkId: "testNetworkId", + resourceId: "testResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "testResource", + Address: "myservice.example.com", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, resource *api.NetworkResource) { + t.Helper() + assert.Equal(t, api.NetworkResourceType("domain"), resource.Type) + assert.Equal(t, "myservice.example.com", resource.Address) + }, + }, + { + name: "Update non-existing resource", + networkId: "testNetworkId", + resourceId: "nonExistingResourceId", + requestBody: &api.NetworkResourceRequest{ + Name: "whatever", + Address: "1.2.3.4", + Groups: []string{testing_tools.TestGroupId}, + Enabled: true, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/resources/%s", tc.networkId, tc.resourceId) + req := testing_tools.BuildRequest(t, body, http.MethodPut, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkResource{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkResources_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + resourceId string + expectedStatus int + }{ + { + name: "Delete existing resource", + networkId: "testNetworkId", + resourceId: "testResourceId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing resource", + networkId: "testNetworkId", + resourceId: "nonExistingResourceId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + path := fmt.Sprintf("/api/networks/%s/resources/%s", tc.networkId, tc.resourceId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + }) + } + } +} + +func Test_NetworkRouters_GetAllInNetwork(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all routers in network", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/testNetworkId/routers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkRouter{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testRouterId", got[0].Id) + assert.Equal(t, "testPeerId", *got[0].Peer) + assert.True(t, got[0].Masquerade) + assert.Equal(t, 100, got[0].Metric) + assert.True(t, got[0].Enabled) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkRouters_GetAllInAccount(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all routers in account", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/networks/routers", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []*api.NetworkRouter{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_NetworkRouters_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + routerId string + expectedStatus int + expectRouter bool + }{ + { + name: "Get existing router", + networkId: "testNetworkId", + routerId: "testRouterId", + expectedStatus: http.StatusOK, + expectRouter: true, + }, + { + name: "Get non-existing router", + networkId: "testNetworkId", + routerId: "nonExistingRouterId", + expectedStatus: http.StatusNotFound, + expectRouter: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, true) + + path := fmt.Sprintf("/api/networks/%s/routers/%s", tc.networkId, tc.routerId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectRouter { + got := &api.NetworkRouter{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.routerId, got.Id) + assert.Equal(t, "testPeerId", *got.Peer) + assert.True(t, got.Masquerade) + assert.Equal(t, 100, got.Metric) + assert.True(t, got.Enabled) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_NetworkRouters_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + peerID := "testPeerId" + peerGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + networkId string + requestBody *api.NetworkRouterRequest + expectedStatus int + verifyResponse func(t *testing.T, router *api.NetworkRouter) + }{ + { + name: "Create router with peer", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 200, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotEmpty(t, router.Id) + assert.Equal(t, peerID, *router.Peer) + assert.True(t, router.Masquerade) + assert.Equal(t, 200, router.Metric) + assert.True(t, router.Enabled) + }, + }, + { + name: "Create router with peer groups", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + PeerGroups: &peerGroups, + Masquerade: false, + Metric: 300, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotEmpty(t, router.Id) + assert.NotNil(t, router.PeerGroups) + assert.Equal(t, 1, len(*router.PeerGroups)) + assert.False(t, router.Masquerade) + assert.Equal(t, 300, router.Metric) + assert.True(t, router.Enabled) // always true on creation + }, + }, + { + name: "Create router with both peer and peer_groups", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + PeerGroups: &peerGroups, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotEmpty(t, router.Id) + assert.Equal(t, peerID, *router.Peer) + assert.Equal(t, 1, len(*router.PeerGroups)) + }, + }, + { + name: "Create router in non-existing network", + networkId: "nonExistingNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusNotFound, + }, + { + name: "Create router enabled is always true", + networkId: "testNetworkId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: false, + Metric: 50, + Enabled: false, // handler sets to true + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.True(t, router.Enabled) // always true on creation + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/routers", tc.networkId) + req := testing_tools.BuildRequest(t, body, http.MethodPost, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkRouter{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkRouters_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + peerID := "testPeerId" + peerGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + networkId string + routerId string + requestBody *api.NetworkRouterRequest + expectedStatus int + verifyResponse func(t *testing.T, router *api.NetworkRouter) + }{ + { + name: "Update router metric and masquerade", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: false, + Metric: 500, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.Equal(t, "testRouterId", router.Id) + assert.False(t, router.Masquerade) + assert.Equal(t, 500, router.Metric) + }, + }, + { + name: "Update router to use peer groups", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + PeerGroups: &peerGroups, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.NotNil(t, router.PeerGroups) + assert.Equal(t, 1, len(*router.PeerGroups)) + }, + }, + { + name: "Update router disabled", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 100, + Enabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.False(t, router.Enabled) + }, + }, + { + name: "Update non-existing router creates it", + networkId: "testNetworkId", + routerId: "nonExistingRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.Equal(t, "nonExistingRouterId", router.Id) + }, + }, + { + name: "Update router with both peer and peer_groups", + networkId: "testNetworkId", + routerId: "testRouterId", + requestBody: &api.NetworkRouterRequest{ + Peer: &peerID, + PeerGroups: &peerGroups, + Masquerade: true, + Metric: 100, + Enabled: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, router *api.NetworkRouter) { + t.Helper() + assert.Equal(t, "testRouterId", router.Id) + assert.Equal(t, peerID, *router.Peer) + assert.Equal(t, 1, len(*router.PeerGroups)) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + require.NoError(t, err) + + path := fmt.Sprintf("/api/networks/%s/routers/%s", tc.networkId, tc.routerId) + req := testing_tools.BuildRequest(t, body, http.MethodPut, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.NetworkRouter{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + }) + } + } +} + +func Test_NetworkRouters_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + networkId string + routerId string + expectedStatus int + }{ + { + name: "Delete existing router", + networkId: "testNetworkId", + routerId: "testRouterId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing router", + networkId: "testNetworkId", + routerId: "nonExistingRouterId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/networks.sql", nil, false) + + path := fmt.Sprintf("/api/networks/%s/routers/%s", tc.networkId, tc.routerId) + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + }) + } + } +} diff --git a/management/server/http/testing/integration/peers_handler_integration_test.go b/management/server/http/testing/integration/peers_handler_integration_test.go new file mode 100644 index 000000000..17a9e94a6 --- /dev/null +++ b/management/server/http/testing/integration/peers_handler_integration_test.go @@ -0,0 +1,605 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +const ( + testPeerId2 = "testPeerId2" +) + +func Test_Peers_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: true, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + for _, user := range users { + t.Run(user.name+" - Get all peers", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/peers", user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + var got []api.PeerBatch + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 2, "Expected at least 2 peers") + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Peers_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: true, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestType string + requestPath string + requestId string + verifyResponse func(t *testing.T, peer *api.Peer) + }{ + { + name: "Get existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, "test-peer-1", peer.Name) + assert.Equal(t, "test-host-1", peer.Hostname) + assert.Equal(t, "Debian GNU/Linux ", peer.Os) + assert.Equal(t, "0.12.0", peer.Version) + assert.Equal(t, false, peer.SshEnabled) + assert.Equal(t, true, peer.LoginExpirationEnabled) + }, + }, + { + name: "Get second existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}", + requestId: testPeerId2, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testPeerId2, peer.Id) + assert.Equal(t, "test-peer-2", peer.Name) + assert.Equal(t, "test-host-2", peer.Hostname) + assert.Equal(t, "Ubuntu ", peer.Os) + assert.Equal(t, true, peer.SshEnabled) + assert.Equal(t, false, peer.LoginExpirationEnabled) + assert.Equal(t, true, peer.Connected) + }, + }, + { + name: "Get non-existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}", + requestId: "nonExistingPeerId", + expectedStatus: http.StatusNotFound, + verifyResponse: nil, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Peer{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Peers_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: false, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestBody *api.PeerRequest + requestType string + requestPath string + requestId string + verifyResponse func(t *testing.T, peer *api.Peer) + }{ + { + name: "Update peer name", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + requestBody: &api.PeerRequest{ + Name: "updated-peer-name", + SshEnabled: false, + LoginExpirationEnabled: true, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, "updated-peer-name", peer.Name) + assert.Equal(t, false, peer.SshEnabled) + assert.Equal(t, true, peer.LoginExpirationEnabled) + }, + }, + { + name: "Enable SSH on peer", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + requestBody: &api.PeerRequest{ + Name: "test-peer-1", + SshEnabled: true, + LoginExpirationEnabled: true, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, "test-peer-1", peer.Name) + assert.Equal(t, true, peer.SshEnabled) + assert.Equal(t, true, peer.LoginExpirationEnabled) + }, + }, + { + name: "Disable login expiration on peer", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: testing_tools.TestPeerId, + requestBody: &api.PeerRequest{ + Name: "test-peer-1", + SshEnabled: false, + LoginExpirationEnabled: false, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, peer *api.Peer) { + t.Helper() + assert.Equal(t, testing_tools.TestPeerId, peer.Id) + assert.Equal(t, false, peer.LoginExpirationEnabled) + }, + }, + { + name: "Update non-existing peer", + requestType: http.MethodPut, + requestPath: "/api/peers/{peerId}", + requestId: "nonExistingPeerId", + requestBody: &api.PeerRequest{ + Name: "updated-name", + SshEnabled: false, + LoginExpirationEnabled: false, + InactivityExpirationEnabled: false, + }, + expectedStatus: http.StatusNotFound, + verifyResponse: nil, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Peer{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated peer in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbPeer := testing_tools.VerifyPeerInDB(t, db, tc.requestId) + assert.Equal(t, tc.requestBody.Name, dbPeer.Name) + assert.Equal(t, tc.requestBody.SshEnabled, dbPeer.SSHEnabled) + assert.Equal(t, tc.requestBody.LoginExpirationEnabled, dbPeer.LoginExpirationEnabled) + assert.Equal(t, tc.requestBody.InactivityExpirationEnabled, dbPeer.InactivityExpirationEnabled) + } + }) + } + } +} + +func Test_Peers_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: false, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestType string + requestPath string + requestId string + }{ + { + name: "Delete existing peer", + requestType: http.MethodDelete, + requestPath: "/api/peers/{peerId}", + requestId: testPeerId2, + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing peer", + requestType: http.MethodDelete, + requestPath: "/api/peers/{peerId}", + requestId: "nonExistingPeerId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + // Verify peer is actually deleted in DB + if tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyPeerNotInDB(t, db, tc.requestId) + } + }) + } + } +} + +func Test_Peers_GetAccessiblePeers(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + { + name: "Regular user", + userId: testing_tools.TestUserId, + expectResponse: false, + }, + { + name: "Admin user", + userId: testing_tools.TestAdminId, + expectResponse: true, + }, + { + name: "Owner user", + userId: testing_tools.TestOwnerId, + expectResponse: true, + }, + { + name: "Regular service user", + userId: testing_tools.TestServiceUserId, + expectResponse: false, + }, + { + name: "Admin service user", + userId: testing_tools.TestServiceAdminId, + expectResponse: true, + }, + { + name: "Blocked user", + userId: testing_tools.BlockedUserId, + expectResponse: false, + }, + { + name: "Other user", + userId: testing_tools.OtherUserId, + expectResponse: false, + }, + { + name: "Invalid token", + userId: testing_tools.InvalidToken, + expectResponse: false, + }, + } + + tt := []struct { + name string + expectedStatus int + requestType string + requestPath string + requestId string + }{ + { + name: "Get accessible peers for existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}/accessible-peers", + requestId: testing_tools.TestPeerId, + expectedStatus: http.StatusOK, + }, + { + name: "Get accessible peers for non-existing peer", + requestType: http.MethodGet, + requestPath: "/api/peers/{peerId}/accessible-peers", + requestId: "nonExistingPeerId", + expectedStatus: http.StatusOK, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/peers_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, tc.requestType, strings.Replace(tc.requestPath, "{peerId}", tc.requestId, 1), user.userId) + recorder := httptest.NewRecorder() + + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectedStatus == http.StatusOK { + var got []api.AccessiblePeer + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + // The accessible peers list should be a valid array (may be empty if no policies connect peers) + assert.NotNil(t, got, "Expected accessible peers to be a valid array") + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} diff --git a/management/server/http/testing/integration/policies_handler_integration_test.go b/management/server/http/testing/integration/policies_handler_integration_test.go new file mode 100644 index 000000000..6f3624fb5 --- /dev/null +++ b/management/server/http/testing/integration/policies_handler_integration_test.go @@ -0,0 +1,488 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Policies_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all policies", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/policies", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Policy{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "testPolicy", got[0].Name) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Policies_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + policyId string + expectedStatus int + expectPolicy bool + }{ + { + name: "Get existing policy", + policyId: "testPolicyId", + expectedStatus: http.StatusOK, + expectPolicy: true, + }, + { + name: "Get non-existing policy", + policyId: "nonExistingPolicyId", + expectedStatus: http.StatusNotFound, + expectPolicy: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/policies/{policyId}", "{policyId}", tc.policyId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectPolicy { + got := &api.Policy{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.NotNil(t, got.Id) + assert.Equal(t, tc.policyId, *got.Id) + assert.Equal(t, "testPolicy", got.Name) + assert.Equal(t, true, got.Enabled) + assert.GreaterOrEqual(t, len(got.Rules), 1) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Policies_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + srcGroups := []string{testing_tools.TestGroupId} + dstGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + requestBody *api.PolicyCreate + expectedStatus int + verifyResponse func(t *testing.T, policy *api.Policy) + }{ + { + name: "Create policy with accept rule", + requestBody: &api.PolicyCreate{ + Name: "newPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "allowAll", + Enabled: true, + Action: "accept", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.NotNil(t, policy.Id) + assert.Equal(t, "newPolicy", policy.Name) + assert.Equal(t, true, policy.Enabled) + assert.Equal(t, 1, len(policy.Rules)) + assert.Equal(t, "allowAll", policy.Rules[0].Name) + }, + }, + { + name: "Create policy with drop rule", + requestBody: &api.PolicyCreate{ + Name: "dropPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "dropAll", + Enabled: true, + Action: "drop", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, "dropPolicy", policy.Name) + }, + }, + { + name: "Create policy with TCP rule and ports", + requestBody: &api.PolicyCreate{ + Name: "tcpPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "tcpRule", + Enabled: true, + Action: "accept", + Protocol: "tcp", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + Ports: &[]string{"80", "443"}, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, "tcpPolicy", policy.Name) + assert.NotNil(t, policy.Rules[0].Ports) + assert.Equal(t, 2, len(*policy.Rules[0].Ports)) + }, + }, + { + name: "Create policy with empty name", + requestBody: &api.PolicyCreate{ + Name: "", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "rule", + Enabled: true, + Action: "accept", + Protocol: "all", + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create policy with no rules", + requestBody: &api.PolicyCreate{ + Name: "noRulesPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/policies", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Policy{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify policy exists in DB with correct fields + db := testing_tools.GetDB(t, am.GetStore()) + dbPolicy := testing_tools.VerifyPolicyInDB(t, db, *got.Id) + assert.Equal(t, tc.requestBody.Name, dbPolicy.Name) + assert.Equal(t, tc.requestBody.Enabled, dbPolicy.Enabled) + assert.Equal(t, len(tc.requestBody.Rules), len(dbPolicy.Rules)) + } + }) + } + } +} + +func Test_Policies_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + srcGroups := []string{testing_tools.TestGroupId} + dstGroups := []string{testing_tools.TestGroupId} + + tt := []struct { + name string + policyId string + requestBody *api.PolicyCreate + expectedStatus int + verifyResponse func(t *testing.T, policy *api.Policy) + }{ + { + name: "Update policy name", + policyId: "testPolicyId", + requestBody: &api.PolicyCreate{ + Name: "updatedPolicy", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "testRule", + Enabled: true, + Action: "accept", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, "updatedPolicy", policy.Name) + }, + }, + { + name: "Update policy enabled state", + policyId: "testPolicyId", + requestBody: &api.PolicyCreate{ + Name: "testPolicy", + Enabled: false, + Rules: []api.PolicyRuleUpdate{ + { + Name: "testRule", + Enabled: true, + Action: "accept", + Protocol: "all", + Bidirectional: true, + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, policy *api.Policy) { + t.Helper() + assert.Equal(t, false, policy.Enabled) + }, + }, + { + name: "Update non-existing policy", + policyId: "nonExistingPolicyId", + requestBody: &api.PolicyCreate{ + Name: "whatever", + Enabled: true, + Rules: []api.PolicyRuleUpdate{ + { + Name: "rule", + Enabled: true, + Action: "accept", + Protocol: "all", + Sources: &srcGroups, + Destinations: &dstGroups, + }, + }, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/policies/{policyId}", "{policyId}", tc.policyId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Policy{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated policy in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbPolicy := testing_tools.VerifyPolicyInDB(t, db, tc.policyId) + assert.Equal(t, tc.requestBody.Name, dbPolicy.Name) + assert.Equal(t, tc.requestBody.Enabled, dbPolicy.Enabled) + } + }) + } + } +} + +func Test_Policies_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + policyId string + expectedStatus int + }{ + { + name: "Delete existing policy", + policyId: "testPolicyId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing policy", + policyId: "nonExistingPolicyId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/policies.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/policies/{policyId}", "{policyId}", tc.policyId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyPolicyNotInDB(t, db, tc.policyId) + } + }) + } + } +} diff --git a/management/server/http/testing/integration/routes_handler_integration_test.go b/management/server/http/testing/integration/routes_handler_integration_test.go new file mode 100644 index 000000000..eeb0c3025 --- /dev/null +++ b/management/server/http/testing/integration/routes_handler_integration_test.go @@ -0,0 +1,455 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/route" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Routes_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all routes", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/routes", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.Route{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 2, len(got)) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Routes_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + routeId string + expectedStatus int + expectRoute bool + }{ + { + name: "Get existing route", + routeId: "testRouteId", + expectedStatus: http.StatusOK, + expectRoute: true, + }, + { + name: "Get non-existing route", + routeId: "nonExistingRouteId", + expectedStatus: http.StatusNotFound, + expectRoute: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/routes/{routeId}", "{routeId}", tc.routeId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectRoute { + got := &api.Route{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, tc.routeId, got.Id) + assert.Equal(t, "Test Network Route", got.Description) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Routes_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + networkCIDR := "10.10.0.0/24" + peerID := testing_tools.TestPeerId + peerGroups := []string{"peerGroupId"} + + tt := []struct { + name string + requestBody *api.RouteRequest + expectedStatus int + verifyResponse func(t *testing.T, route *api.Route) + }{ + { + name: "Create network route with peer", + requestBody: &api.RouteRequest{ + Description: "New network route", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "newNet", + Metric: 100, + Masquerade: true, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.NotEmpty(t, route.Id) + assert.Equal(t, "New network route", route.Description) + assert.Equal(t, 100, route.Metric) + assert.Equal(t, true, route.Masquerade) + assert.Equal(t, true, route.Enabled) + }, + }, + { + name: "Create network route with peer groups", + requestBody: &api.RouteRequest{ + Description: "Route with peer groups", + Network: &networkCIDR, + PeerGroups: &peerGroups, + NetworkId: "peerGroupNet", + Metric: 150, + Masquerade: false, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.NotEmpty(t, route.Id) + assert.Equal(t, "Route with peer groups", route.Description) + }, + }, + { + name: "Create route with empty network_id", + requestBody: &api.RouteRequest{ + Description: "Empty net id", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "", + Metric: 100, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create route with metric 0", + requestBody: &api.RouteRequest{ + Description: "Zero metric", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "zeroMetric", + Metric: 0, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create route with metric 10000", + requestBody: &api.RouteRequest{ + Description: "High metric", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "highMetric", + Metric: 10000, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/routes", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Route{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify route exists in DB with correct fields + db := testing_tools.GetDB(t, am.GetStore()) + dbRoute := testing_tools.VerifyRouteInDB(t, db, route.ID(got.Id)) + assert.Equal(t, tc.requestBody.Description, dbRoute.Description) + assert.Equal(t, tc.requestBody.Metric, dbRoute.Metric) + assert.Equal(t, tc.requestBody.Masquerade, dbRoute.Masquerade) + assert.Equal(t, tc.requestBody.Enabled, dbRoute.Enabled) + assert.Equal(t, route.NetID(tc.requestBody.NetworkId), dbRoute.NetID) + } + }) + } + } +} + +func Test_Routes_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + networkCIDR := "10.0.0.0/24" + peerID := testing_tools.TestPeerId + + tt := []struct { + name string + routeId string + requestBody *api.RouteRequest + expectedStatus int + verifyResponse func(t *testing.T, route *api.Route) + }{ + { + name: "Update route description", + routeId: "testRouteId", + requestBody: &api.RouteRequest{ + Description: "Updated description", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "testNet", + Metric: 100, + Masquerade: true, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.Equal(t, "testRouteId", route.Id) + assert.Equal(t, "Updated description", route.Description) + }, + }, + { + name: "Update route metric", + routeId: "testRouteId", + requestBody: &api.RouteRequest{ + Description: "Test Network Route", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "testNet", + Metric: 500, + Masquerade: true, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, route *api.Route) { + t.Helper() + assert.Equal(t, 500, route.Metric) + }, + }, + { + name: "Update non-existing route", + routeId: "nonExistingRouteId", + requestBody: &api.RouteRequest{ + Description: "whatever", + Network: &networkCIDR, + Peer: &peerID, + NetworkId: "testNet", + Metric: 100, + Enabled: true, + Groups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/routes/{routeId}", "{routeId}", tc.routeId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.Route{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated route in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbRoute := testing_tools.VerifyRouteInDB(t, db, route.ID(got.Id)) + assert.Equal(t, tc.requestBody.Description, dbRoute.Description) + assert.Equal(t, tc.requestBody.Metric, dbRoute.Metric) + assert.Equal(t, tc.requestBody.Masquerade, dbRoute.Masquerade) + assert.Equal(t, tc.requestBody.Enabled, dbRoute.Enabled) + } + }) + } + } +} + +func Test_Routes_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + routeId string + expectedStatus int + }{ + { + name: "Delete existing route", + routeId: "testRouteId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing route", + routeId: "nonExistingRouteId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/routes.sql", nil, false) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/routes/{routeId}", "{routeId}", tc.routeId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify route was deleted from DB for successful deletes + if tc.expectedStatus == http.StatusOK && user.expectResponse { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyRouteNotInDB(t, db, route.ID(tc.routeId)) + } + }) + } + } +} diff --git a/management/server/http/testing/integration/setupkeys_handler_integration_test.go b/management/server/http/testing/integration/setupkeys_handler_integration_test.go index c1a9829da..0d3aaac82 100644 --- a/management/server/http/testing/integration/setupkeys_handler_integration_test.go +++ b/management/server/http/testing/integration/setupkeys_handler_integration_test.go @@ -3,7 +3,6 @@ package integration import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -14,7 +13,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/netbirdio/netbird/management/server/http/handlers/setup_keys" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" "github.com/netbirdio/netbird/shared/management/http/api" @@ -254,7 +252,7 @@ func Test_SetupKeys_Create(t *testing.T) { expectedResponse: nil, }, { - name: "Create Setup Key", + name: "Create Setup Key with nil AutoGroups", requestType: http.MethodPost, requestPath: "/api/setup-keys", requestBody: &api.CreateSetupKeyRequest{ @@ -308,14 +306,15 @@ func Test_SetupKeys_Create(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } + gotID := got.Id validateCreatedKey(t, tc.expectedResponse, got) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse, setup_keys.ToResponseBody(key)) + // Verify setup key exists in DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, tc.expectedResponse.Name, dbKey.Name) + assert.Equal(t, tc.expectedResponse.Revoked, dbKey.Revoked) + assert.Equal(t, tc.expectedResponse.UsageLimit, dbKey.UsageLimit) select { case <-done: @@ -571,7 +570,7 @@ func Test_SetupKeys_Update(t *testing.T) { for _, tc := range tt { for _, user := range users { - t.Run(tc.name, func(t *testing.T) { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/setup_keys.sql", nil, true) body, err := json.Marshal(tc.requestBody) @@ -594,14 +593,16 @@ func Test_SetupKeys_Update(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } + gotID := got.Id + gotRevoked := got.Revoked + gotUsageLimit := got.UsageLimit validateCreatedKey(t, tc.expectedResponse, got) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse, setup_keys.ToResponseBody(key)) + // Verify updated setup key in DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, gotRevoked, dbKey.Revoked) + assert.Equal(t, gotUsageLimit, dbKey.UsageLimit) select { case <-done: @@ -759,8 +760,8 @@ func Test_SetupKeys_Get(t *testing.T) { apiHandler.ServeHTTP(recorder, req) - content, expectRespnose := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) - if !expectRespnose { + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { return } got := &api.SetupKey{} @@ -768,14 +769,16 @@ func Test_SetupKeys_Get(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } + gotID := got.Id + gotName := got.Name + gotRevoked := got.Revoked validateCreatedKey(t, tc.expectedResponse, got) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse, setup_keys.ToResponseBody(key)) + // Verify setup key in DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, gotName, dbKey.Name) + assert.Equal(t, gotRevoked, dbKey.Revoked) select { case <-done: @@ -928,15 +931,17 @@ func Test_SetupKeys_GetAll(t *testing.T) { return tc.expectedResponse[i].UsageLimit < tc.expectedResponse[j].UsageLimit }) + db := testing_tools.GetDB(t, am.GetStore()) for i := range tc.expectedResponse { + gotID := got[i].Id + gotName := got[i].Name + gotRevoked := got[i].Revoked validateCreatedKey(t, tc.expectedResponse[i], &got[i]) - key, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got[i].Id) - if err != nil { - return - } - - validateCreatedKey(t, tc.expectedResponse[i], setup_keys.ToResponseBody(key)) + // Verify each setup key in DB via gorm + dbKey := testing_tools.VerifySetupKeyInDB(t, db, gotID) + assert.Equal(t, gotName, dbKey.Name) + assert.Equal(t, gotRevoked, dbKey.Revoked) } select { @@ -1104,8 +1109,9 @@ func Test_SetupKeys_Delete(t *testing.T) { t.Fatalf("Sent content is not in correct json format; %v", err) } - _, err := am.GetSetupKey(context.Background(), testing_tools.TestAccountId, testing_tools.TestUserId, got.Id) - assert.Errorf(t, err, "Expected error when trying to get deleted key") + // Verify setup key deleted from DB via gorm + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifySetupKeyNotInDB(t, db, got.Id) select { case <-done: @@ -1120,7 +1126,7 @@ func Test_SetupKeys_Delete(t *testing.T) { func validateCreatedKey(t *testing.T, expectedKey *api.SetupKey, got *api.SetupKey) { t.Helper() - if got.Expires.After(time.Now().Add(-1*time.Minute)) && got.Expires.Before(time.Now().Add(testing_tools.ExpiresIn*time.Second)) || + if (got.Expires.After(time.Now().Add(-1*time.Minute)) && got.Expires.Before(time.Now().Add(testing_tools.ExpiresIn*time.Second))) || got.Expires.After(time.Date(2300, 01, 01, 0, 0, 0, 0, time.Local)) || got.Expires.Before(time.Date(1950, 01, 01, 0, 0, 0, 0, time.Local)) { got.Expires = time.Time{} diff --git a/management/server/http/testing/integration/users_handler_integration_test.go b/management/server/http/testing/integration/users_handler_integration_test.go new file mode 100644 index 000000000..eae3b4ad5 --- /dev/null +++ b/management/server/http/testing/integration/users_handler_integration_test.go @@ -0,0 +1,701 @@ +//go:build integration + +package integration + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" + "github.com/netbirdio/netbird/management/server/http/testing/testing_tools/channel" + "github.com/netbirdio/netbird/shared/management/http/api" +) + +func Test_Users_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, true}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, true}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all users", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/users", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.User{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.GreaterOrEqual(t, len(got), 1) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Users_GetAll_ServiceUsers(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all service users", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, "/api/users?service_user=true", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.User{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + for _, u := range got { + assert.NotNil(t, u.IsServiceUser) + assert.Equal(t, true, *u.IsServiceUser) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_Users_Create_ServiceUser(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + requestBody *api.UserCreateRequest + expectedStatus int + verifyResponse func(t *testing.T, user *api.User) + }{ + { + name: "Create service user with admin role", + requestBody: &api.UserCreateRequest{ + Role: "admin", + IsServiceUser: true, + AutoGroups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.NotEmpty(t, user.Id) + assert.Equal(t, "admin", user.Role) + assert.NotNil(t, user.IsServiceUser) + assert.Equal(t, true, *user.IsServiceUser) + }, + }, + { + name: "Create service user with user role", + requestBody: &api.UserCreateRequest{ + Role: "user", + IsServiceUser: true, + AutoGroups: []string{testing_tools.TestGroupId}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.NotEmpty(t, user.Id) + assert.Equal(t, "user", user.Role) + }, + }, + { + name: "Create service user with empty auto_groups", + requestBody: &api.UserCreateRequest{ + Role: "admin", + IsServiceUser: true, + AutoGroups: []string{}, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.NotEmpty(t, user.Id) + }, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, "/api/users", user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.User{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify user in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbUser := testing_tools.VerifyUserInDB(t, db, got.Id) + assert.True(t, dbUser.IsServiceUser) + assert.Equal(t, string(dbUser.Role), string(tc.requestBody.Role)) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_Users_Update(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + targetUserId string + requestBody *api.UserRequest + expectedStatus int + verifyResponse func(t *testing.T, user *api.User) + }{ + { + name: "Update user role to admin", + targetUserId: testing_tools.TestUserId, + requestBody: &api.UserRequest{ + Role: "admin", + AutoGroups: []string{}, + IsBlocked: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.Equal(t, "admin", user.Role) + }, + }, + { + name: "Update user auto_groups", + targetUserId: testing_tools.TestUserId, + requestBody: &api.UserRequest{ + Role: "user", + AutoGroups: []string{testing_tools.TestGroupId}, + IsBlocked: false, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.Equal(t, 1, len(user.AutoGroups)) + }, + }, + { + name: "Block user", + targetUserId: testing_tools.TestUserId, + requestBody: &api.UserRequest{ + Role: "user", + AutoGroups: []string{}, + IsBlocked: true, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, user *api.User) { + t.Helper() + assert.Equal(t, true, user.IsBlocked) + }, + }, + { + name: "Update non-existing user", + targetUserId: "nonExistingUserId", + requestBody: &api.UserRequest{ + Role: "user", + AutoGroups: []string{}, + IsBlocked: false, + }, + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, _ := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, false) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPut, strings.Replace("/api/users/{userId}", "{userId}", tc.targetUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.User{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify updated fields in DB + if tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + dbUser := testing_tools.VerifyUserInDB(t, db, tc.targetUserId) + assert.Equal(t, string(dbUser.Role), string(tc.requestBody.Role)) + assert.Equal(t, dbUser.Blocked, tc.requestBody.IsBlocked) + assert.ElementsMatch(t, dbUser.AutoGroups, tc.requestBody.AutoGroups) + } + } + }) + } + } +} + +func Test_Users_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + targetUserId string + expectedStatus int + }{ + { + name: "Delete existing service user", + targetUserId: "deletableServiceUserId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing user", + targetUserId: "nonExistingUserId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, strings.Replace("/api/users/{userId}", "{userId}", tc.targetUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify user deleted from DB for successful deletes + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyUserNotInDB(t, db, tc.targetUserId) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_PATs_GetAll(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + for _, user := range users { + t.Run(user.name+" - Get all PATs for service user", func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, strings.Replace("/api/users/{userId}/tokens", "{userId}", testing_tools.TestServiceUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, http.StatusOK, user.expectResponse) + if !expectResponse { + return + } + + got := []api.PersonalAccessToken{} + if err := json.Unmarshal(content, &got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + + assert.Equal(t, 1, len(got)) + assert.Equal(t, "serviceToken", got[0].Name) + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } +} + +func Test_PATs_GetById(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + tokenId string + expectedStatus int + expectToken bool + }{ + { + name: "Get existing PAT", + tokenId: "serviceTokenId", + expectedStatus: http.StatusOK, + expectToken: true, + }, + { + name: "Get non-existing PAT", + tokenId: "nonExistingTokenId", + expectedStatus: http.StatusNotFound, + expectToken: false, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, _, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + path := strings.Replace("/api/users/{userId}/tokens/{tokenId}", "{userId}", testing_tools.TestServiceUserId, 1) + path = strings.Replace(path, "{tokenId}", tc.tokenId, 1) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodGet, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.expectToken { + got := &api.PersonalAccessToken{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + assert.Equal(t, "serviceTokenId", got.Id) + assert.Equal(t, "serviceToken", got.Name) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_PATs_Create(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + targetUserId string + requestBody *api.PersonalAccessTokenRequest + expectedStatus int + verifyResponse func(t *testing.T, pat *api.PersonalAccessTokenGenerated) + }{ + { + name: "Create PAT with 30 day expiry", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "newPAT", + ExpiresIn: 30, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, pat *api.PersonalAccessTokenGenerated) { + t.Helper() + assert.NotEmpty(t, pat.PlainToken) + assert.Equal(t, "newPAT", pat.PersonalAccessToken.Name) + }, + }, + { + name: "Create PAT with 365 day expiry", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "longPAT", + ExpiresIn: 365, + }, + expectedStatus: http.StatusOK, + verifyResponse: func(t *testing.T, pat *api.PersonalAccessTokenGenerated) { + t.Helper() + assert.NotEmpty(t, pat.PlainToken) + assert.Equal(t, "longPAT", pat.PersonalAccessToken.Name) + }, + }, + { + name: "Create PAT with empty name", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "", + ExpiresIn: 30, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create PAT with 0 day expiry", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "zeroPAT", + ExpiresIn: 0, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + { + name: "Create PAT with expiry over 365 days", + targetUserId: testing_tools.TestServiceUserId, + requestBody: &api.PersonalAccessTokenRequest{ + Name: "tooLongPAT", + ExpiresIn: 400, + }, + expectedStatus: http.StatusUnprocessableEntity, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + body, err := json.Marshal(tc.requestBody) + if err != nil { + t.Fatalf("Failed to marshal request body: %v", err) + } + + req := testing_tools.BuildRequest(t, body, http.MethodPost, strings.Replace("/api/users/{userId}/tokens", "{userId}", tc.targetUserId, 1), user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + content, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + if !expectResponse { + return + } + + if tc.verifyResponse != nil { + got := &api.PersonalAccessTokenGenerated{} + if err := json.Unmarshal(content, got); err != nil { + t.Fatalf("Sent content is not in correct json format; %v", err) + } + tc.verifyResponse(t, got) + + // Verify PAT in DB + db := testing_tools.GetDB(t, am.GetStore()) + dbPAT := testing_tools.VerifyPATInDB(t, db, got.PersonalAccessToken.Id) + assert.Equal(t, tc.requestBody.Name, dbPAT.Name) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} + +func Test_PATs_Delete(t *testing.T) { + users := []struct { + name string + userId string + expectResponse bool + }{ + {"Regular user", testing_tools.TestUserId, false}, + {"Admin user", testing_tools.TestAdminId, true}, + {"Owner user", testing_tools.TestOwnerId, true}, + {"Regular service user", testing_tools.TestServiceUserId, false}, + {"Admin service user", testing_tools.TestServiceAdminId, true}, + {"Blocked user", testing_tools.BlockedUserId, false}, + {"Other user", testing_tools.OtherUserId, false}, + {"Invalid token", testing_tools.InvalidToken, false}, + } + + tt := []struct { + name string + tokenId string + expectedStatus int + }{ + { + name: "Delete existing PAT", + tokenId: "serviceTokenId", + expectedStatus: http.StatusOK, + }, + { + name: "Delete non-existing PAT", + tokenId: "nonExistingTokenId", + expectedStatus: http.StatusNotFound, + }, + } + + for _, tc := range tt { + for _, user := range users { + t.Run(user.name+" - "+tc.name, func(t *testing.T) { + apiHandler, am, done := channel.BuildApiBlackBoxWithDBState(t, "../testdata/users_integration.sql", nil, true) + + path := strings.Replace("/api/users/{userId}/tokens/{tokenId}", "{userId}", testing_tools.TestServiceUserId, 1) + path = strings.Replace(path, "{tokenId}", tc.tokenId, 1) + + req := testing_tools.BuildRequest(t, []byte{}, http.MethodDelete, path, user.userId) + recorder := httptest.NewRecorder() + apiHandler.ServeHTTP(recorder, req) + + _, expectResponse := testing_tools.ReadResponse(t, recorder, tc.expectedStatus, user.expectResponse) + + // Verify PAT deleted from DB for successful deletes + if expectResponse && tc.expectedStatus == http.StatusOK { + db := testing_tools.GetDB(t, am.GetStore()) + testing_tools.VerifyPATNotInDB(t, db, tc.tokenId) + } + + select { + case <-done: + case <-time.After(time.Second): + t.Error("timeout waiting for peerShouldNotReceiveUpdate") + } + }) + } + } +} diff --git a/management/server/http/testing/testdata/accounts.sql b/management/server/http/testing/testdata/accounts.sql new file mode 100644 index 000000000..35f00d419 --- /dev/null +++ b/management/server/http/testing/testdata/accounts.sql @@ -0,0 +1,18 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); diff --git a/management/server/http/testing/testdata/dns.sql b/management/server/http/testing/testdata/dns.sql new file mode 100644 index 000000000..9ed4daf7e --- /dev/null +++ b/management/server/http/testing/testdata/dns.sql @@ -0,0 +1,21 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `name_server_groups` (`id` text,`account_id` text,`name` text,`description` text,`name_servers` text,`groups` text,`primary` numeric,`domains` text,`enabled` numeric,`search_domains_enabled` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_name_server_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO name_server_groups VALUES('testNSGroupId','testAccountId','testNSGroup','test nameserver group','[{"IP":"1.1.1.1","NSType":1,"Port":53}]','["testGroupId"]',0,'["example.com"]',1,0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/events.sql b/management/server/http/testing/testdata/events.sql new file mode 100644 index 000000000..27fd01aea --- /dev/null +++ b/management/server/http/testing/testdata/events.sql @@ -0,0 +1,18 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/groups.sql b/management/server/http/testing/testdata/groups.sql new file mode 100644 index 000000000..eb874f036 --- /dev/null +++ b/management/server/http/testing/testdata/groups.sql @@ -0,0 +1,19 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('allGroupId','testAccountId','All','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/networks.sql b/management/server/http/testing/testdata/networks.sql new file mode 100644 index 000000000..39ec8e646 --- /dev/null +++ b/management/server/http/testing/testdata/networks.sql @@ -0,0 +1,25 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `networks` (`id` text,`account_id` text,`name` text,`description` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_networks` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `network_routers` (`id` text,`network_id` text,`account_id` text,`peer` text,`peer_groups` text,`masquerade` numeric,`metric` integer,`enabled` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_network_routers` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `network_resources` (`id` text,`network_id` text,`account_id` text,`name` text,`description` text,`type` text,`domain` text,`prefix` text,`enabled` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_network_resources` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'testServiceUser','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'testServiceAdmin','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:00',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO networks VALUES('testNetworkId','testAccountId','testNetwork','test network description'); +INSERT INTO network_routers VALUES('testRouterId','testNetworkId','testAccountId','testPeerId','[]',1,100,1); +INSERT INTO network_resources VALUES('testResourceId','testNetworkId','testAccountId','testResource','test resource description','host','','"3.3.3.3/32"',1); \ No newline at end of file diff --git a/management/server/http/testing/testdata/peers_integration.sql b/management/server/http/testing/testdata/peers_integration.sql new file mode 100644 index 000000000..62a7760e7 --- /dev/null +++ b/management/server/http/testing/testdata/peers_integration.sql @@ -0,0 +1,20 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId","testPeerId2"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); + +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','test-host-1','linux','Linux','','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'test-peer-1','test-peer-1','2023-03-02 09:21:02.189035775+01:00',0,0,0,'testUserId','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); +INSERT INTO peers VALUES('testPeerId2','testAccountId','6rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYBg=','82546A29-6BC8-4311-BCFC-9CDBF33F1A49','"100.64.114.32"','test-host-2','linux','Linux','','unknown','Ubuntu','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'test-peer-2','test-peer-2','2023-03-02 09:21:02.189035775+01:00',1,0,0,'testAdminId','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',1,0,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); \ No newline at end of file diff --git a/management/server/http/testing/testdata/policies.sql b/management/server/http/testing/testdata/policies.sql new file mode 100644 index 000000000..7e6cc883b --- /dev/null +++ b/management/server/http/testing/testdata/policies.sql @@ -0,0 +1,23 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `policies` (`id` text,`account_id` text,`name` text,`description` text,`enabled` numeric,`source_posture_checks` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_policies_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `policy_rules` (`id` text,`policy_id` text,`name` text,`description` text,`enabled` numeric,`action` text,`protocol` text,`bidirectional` numeric,`sources` text,`destinations` text,`source_resource` text,`destination_resource` text,`ports` text,`port_ranges` text,`authorized_groups` text,`authorized_user` text,PRIMARY KEY (`id`),CONSTRAINT `fk_policies_rules_g` FOREIGN KEY (`policy_id`) REFERENCES `policies`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO policies VALUES('testPolicyId','testAccountId','testPolicy','test policy description',1,NULL); +INSERT INTO policy_rules VALUES('testRuleId','testPolicyId','testRule','test rule',1,'accept','all',1,'["testGroupId"]','["testGroupId"]',NULL,NULL,NULL,NULL,NULL,''); \ No newline at end of file diff --git a/management/server/http/testing/testdata/routes.sql b/management/server/http/testing/testdata/routes.sql new file mode 100644 index 000000000..48aa02052 --- /dev/null +++ b/management/server/http/testing/testdata/routes.sql @@ -0,0 +1,23 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `routes` (`id` text,`account_id` text,`network` text,`domains` text,`keep_route` numeric,`net_id` text,`description` text,`peer` text,`peer_groups` text,`network_type` integer,`masquerade` numeric,`metric` integer,`enabled` numeric,`groups` text,`access_control_groups` text,`skip_auto_apply` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_routes_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO "groups" VALUES('peerGroupId','testAccountId','peerGroupName','api','["testPeerId"]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO routes VALUES('testRouteId','testAccountId','"10.0.0.0/24"',NULL,0,'testNet','Test Network Route','testPeerId',NULL,1,1,100,1,'["testGroupId"]',NULL,0); +INSERT INTO routes VALUES('testDomainRouteId','testAccountId','"0.0.0.0/0"','["example.com"]',0,'testDomainNet','Test Domain Route','','["peerGroupId"]',3,1,200,1,'["testGroupId"]',NULL,0); diff --git a/management/server/http/testing/testdata/users_integration.sql b/management/server/http/testing/testdata/users_integration.sql new file mode 100644 index 000000000..57df73e8c --- /dev/null +++ b/management/server/http/testing/testdata/users_integration.sql @@ -0,0 +1,24 @@ +CREATE TABLE `accounts` (`id` text,`created_by` text,`created_at` datetime,`domain` text,`domain_category` text,`is_domain_primary_account` numeric,`network_identifier` text,`network_net` text,`network_dns` text,`network_serial` integer,`dns_settings_disabled_management_groups` text,`settings_peer_login_expiration_enabled` numeric,`settings_peer_login_expiration` integer,`settings_regular_users_view_blocked` numeric,`settings_groups_propagation_enabled` numeric,`settings_jwt_groups_enabled` numeric,`settings_jwt_groups_claim_name` text,`settings_jwt_allow_groups` text,`settings_extra_peer_approval_enabled` numeric,`settings_extra_integrated_validator_groups` text,PRIMARY KEY (`id`)); +CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime DEFAULT NULL,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`key_secret` text,`name` text,`type` text,`created_at` datetime,`expires_at` datetime,`updated_at` datetime,`revoked` numeric,`used_times` integer,`last_used` datetime DEFAULT NULL,`auto_groups` text,`usage_limit` integer,`ephemeral` numeric,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_setup_keys_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`)); +CREATE TABLE `personal_access_tokens` (`id` text,`user_id` text,`name` text,`hashed_token` text,`expiration_date` datetime,`created_by` text,`created_at` datetime,`last_used` datetime DEFAULT NULL,PRIMARY KEY (`id`),CONSTRAINT `fk_users_pa_ts_g` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`)); +CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`); + +INSERT INTO accounts VALUES('testAccountId','','2024-10-02 16:01:38.000000000+00:00','test.com','private',1,'testNetworkIdentifier','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',1,86400000000000,0,0,0,'',NULL,NULL,NULL); +INSERT INTO users VALUES('testUserId','testAccountId','user',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testAdminId','testAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testOwnerId','testAccountId','owner',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceUserId','testAccountId','user',1,0,'testServiceUser','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('testServiceAdminId','testAccountId','admin',1,0,'testServiceAdmin','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('blockedUserId','testAccountId','admin',0,0,'','[]',1,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('otherUserId','otherAccountId','admin',0,0,'','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO users VALUES('deletableServiceUserId','testAccountId','user',1,0,'deletableServiceUser','[]',0,NULL,'2024-10-02 16:01:38.000000000+00:00','api',0,''); +INSERT INTO "groups" VALUES('testGroupId','testAccountId','testGroupName','api','["testPeerId"]',0,''); +INSERT INTO "groups" VALUES('newGroupId','testAccountId','newGroupName','api','[]',0,''); +INSERT INTO setup_keys VALUES('testKeyId','testAccountId','testKey','testK****','existingKey','one-off','2021-08-19 20:46:20.000000000+00:00','2321-09-18 20:46:20.000000000+00:00','2021-08-19 20:46:20.000000000+00:000',0,0,NULL,'["testGroupId"]',1,0); +INSERT INTO peers VALUES('testPeerId','testAccountId','5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=','72546A29-6BC8-4311-BCFC-9CDBF33F1A48','"100.64.114.31"','f2a34f6a4731','linux','Linux','11','unknown','Debian GNU/Linux','','0.12.0','','',NULL,'','','','{"Cloud":"","Platform":""}',NULL,'f2a34f6a4731','f2a34f6a4731','2023-03-02 09:21:02.189035775+01:00',0,0,0,'','ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk',0,1,'2023-03-01 19:48:19.817799698+01:00','2024-10-02 17:00:32.527947+02:00',0,'""','','',0); + +INSERT INTO personal_access_tokens VALUES('testTokenId','testUserId','testToken','hashedTokenValue123','2325-10-02 16:01:38.000000000+00:00','testUserId','2024-10-02 16:01:38.000000000+00:00',NULL); +INSERT INTO personal_access_tokens VALUES('serviceTokenId','testServiceUserId','serviceToken','hashedServiceTokenValue123','2325-10-02 16:01:38.000000000+00:00','testAdminId','2024-10-02 16:01:38.000000000+00:00',NULL); \ No newline at end of file diff --git a/management/server/http/testing/testing_tools/channel/channel.go b/management/server/http/testing/testing_tools/channel/channel.go index 2fd414da5..819bd2579 100644 --- a/management/server/http/testing/testing_tools/channel/channel.go +++ b/management/server/http/testing/testing_tools/channel/channel.go @@ -114,13 +114,12 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee if err != nil { t.Fatalf("Failed to create proxy controller: %v", err) } - domainManager.SetClusterCapabilities(serviceProxyController) - serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager) + serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, proxyMgr, domainManager) proxyServiceServer.SetServiceManager(serviceManager) am.SetServiceManager(serviceManager) // @note this is required so that PAT's validate from store, but JWT's are mocked - authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false) + authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false, nil) authManagerMock := &serverauth.MockManager{ ValidateAndParseTokenFunc: mockValidateAndParseToken, EnsureUserAccessByJWTGroupsFunc: authManager.EnsureUserAccessByJWTGroups, @@ -128,14 +127,14 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee GetPATInfoFunc: authManager.GetPATInfo, } - networksManagerMock := networks.NewManagerMock() - resourcesManagerMock := resources.NewManagerMock() - routersManagerMock := routers.NewManagerMock() - groupsManagerMock := groups.NewManagerMock() + groupsManager := groups.NewManager(store, permissionsManager, am) + routersManager := routers.NewManager(store, permissionsManager, am) + resourcesManager := resources.NewManager(store, permissionsManager, groupsManager, am, serviceManager) + networksManager := networks.NewManager(store, permissionsManager, resourcesManager, routersManager, am) customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) - apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) if err != nil { t.Fatalf("Failed to create API handler: %v", err) } @@ -167,6 +166,111 @@ func peerShouldReceiveUpdate(t testing_tools.TB, updateMessage <-chan *network_m } } +// PeerShouldReceiveAnyUpdate waits for a peer update message and returns it. +// Fails the test if no update is received within timeout. +func PeerShouldReceiveAnyUpdate(t testing_tools.TB, updateMessage <-chan *network_map.UpdateMessage) *network_map.UpdateMessage { + t.Helper() + select { + case msg := <-updateMessage: + if msg == nil { + t.Errorf("Received nil update message, expected valid message") + } + return msg + case <-time.After(500 * time.Millisecond): + t.Errorf("Timed out waiting for update message") + return nil + } +} + +// PeerShouldNotReceiveAnyUpdate verifies no peer update message is received. +func PeerShouldNotReceiveAnyUpdate(t testing_tools.TB, updateMessage <-chan *network_map.UpdateMessage) { + t.Helper() + peerShouldNotReceiveUpdate(t, updateMessage) +} + +// BuildApiBlackBoxWithDBStateAndPeerChannel creates the API handler and returns +// the peer update channel directly so tests can verify updates inline. +func BuildApiBlackBoxWithDBStateAndPeerChannel(t testing_tools.TB, sqlFile string) (http.Handler, account.Manager, <-chan *network_map.UpdateMessage) { + store, cleanup, err := store.NewTestStoreFromSQL(context.Background(), sqlFile, t.TempDir()) + if err != nil { + t.Fatalf("Failed to create test store: %v", err) + } + t.Cleanup(cleanup) + + metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) + if err != nil { + t.Fatalf("Failed to create metrics: %v", err) + } + + peersUpdateManager := update_channel.NewPeersUpdateManager(nil) + updMsg := peersUpdateManager.CreateChannel(context.Background(), testing_tools.TestPeerId) + + geoMock := &geolocation.Mock{} + validatorMock := server.MockIntegratedValidator{} + proxyController := integrations.NewController(store) + userManager := users.NewManager(store) + permissionsManager := permissions.NewManager(store) + settingsManager := settings.NewManager(store, userManager, integrations.NewManager(&activity.InMemoryEventStore{}), permissionsManager, settings.IdpConfig{}) + peersManager := peers.NewManager(store, permissionsManager) + + jobManager := job.NewJobManager(nil, store, peersManager) + + ctx := context.Background() + requestBuffer := server.NewAccountRequestBuffer(ctx, store) + networkMapController := controller.NewController(ctx, store, metrics, peersUpdateManager, requestBuffer, server.MockIntegratedValidator{}, settingsManager, "", port_forwarding.NewControllerMock(), ephemeral_manager.NewEphemeralManager(store, peersManager), &config.Config{}) + am, err := server.BuildManager(ctx, nil, store, networkMapController, jobManager, nil, "", &activity.InMemoryEventStore{}, geoMock, false, validatorMock, metrics, proxyController, settingsManager, permissionsManager, false) + if err != nil { + t.Fatalf("Failed to create manager: %v", err) + } + + accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil) + proxyTokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100) + if err != nil { + t.Fatalf("Failed to create proxy token store: %v", err) + } + pkceverifierStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100) + if err != nil { + t.Fatalf("Failed to create PKCE verifier store: %v", err) + } + noopMeter := noop.NewMeterProvider().Meter("") + proxyMgr, err := proxymanager.NewManager(store, noopMeter) + if err != nil { + t.Fatalf("Failed to create proxy manager: %v", err) + } + proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, pkceverifierStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr, nil) + domainManager := manager.NewManager(store, proxyMgr, permissionsManager, am) + serviceProxyController, err := proxymanager.NewGRPCController(proxyServiceServer, noopMeter) + if err != nil { + t.Fatalf("Failed to create proxy controller: %v", err) + } + serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, proxyMgr, domainManager) + proxyServiceServer.SetServiceManager(serviceManager) + am.SetServiceManager(serviceManager) + + // @note this is required so that PAT's validate from store, but JWT's are mocked + authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false, nil) + authManagerMock := &serverauth.MockManager{ + ValidateAndParseTokenFunc: mockValidateAndParseToken, + EnsureUserAccessByJWTGroupsFunc: authManager.EnsureUserAccessByJWTGroups, + MarkPATUsedFunc: authManager.MarkPATUsed, + GetPATInfoFunc: authManager.GetPATInfo, + } + + groupsManager := groups.NewManager(store, permissionsManager, am) + routersManager := routers.NewManager(store, permissionsManager, am) + resourcesManager := resources.NewManager(store, permissionsManager, groupsManager, am, serviceManager) + networksManager := networks.NewManager(store, permissionsManager, resourcesManager, routersManager, am) + customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "") + zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager) + + apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManager, resourcesManager, routersManager, groupsManager, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil) + if err != nil { + t.Fatalf("Failed to create API handler: %v", err) + } + + return apiHandler, am, updMsg +} + func mockValidateAndParseToken(_ context.Context, token string) (auth.UserAuth, *jwt.Token, error) { userAuth := auth.UserAuth{} diff --git a/management/server/http/testing/testing_tools/db_verify.go b/management/server/http/testing/testing_tools/db_verify.go new file mode 100644 index 000000000..f8af6a41f --- /dev/null +++ b/management/server/http/testing/testing_tools/db_verify.go @@ -0,0 +1,222 @@ +package testing_tools + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/gorm" + + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + networkTypes "github.com/netbirdio/netbird/management/server/networks/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/route" +) + +// GetDB extracts the *gorm.DB from a store.Store (must be *SqlStore). +func GetDB(t *testing.T, s store.Store) *gorm.DB { + t.Helper() + sqlStore, ok := s.(*store.SqlStore) + require.True(t, ok, "Store is not a *SqlStore, cannot get gorm.DB") + return sqlStore.GetDB() +} + +// VerifyGroupInDB reads a group directly from the DB and returns it. +func VerifyGroupInDB(t *testing.T, db *gorm.DB, groupID string) *types.Group { + t.Helper() + var group types.Group + err := db.Where("id = ? AND account_id = ?", groupID, TestAccountId).First(&group).Error + require.NoError(t, err, "Expected group %s to exist in DB", groupID) + return &group +} + +// VerifyGroupNotInDB verifies that a group does not exist in the DB. +func VerifyGroupNotInDB(t *testing.T, db *gorm.DB, groupID string) { + t.Helper() + var count int64 + db.Model(&types.Group{}).Where("id = ? AND account_id = ?", groupID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected group %s to NOT exist in DB", groupID) +} + +// VerifyPolicyInDB reads a policy directly from the DB and returns it. +func VerifyPolicyInDB(t *testing.T, db *gorm.DB, policyID string) *types.Policy { + t.Helper() + var policy types.Policy + err := db.Preload("Rules").Where("id = ? AND account_id = ?", policyID, TestAccountId).First(&policy).Error + require.NoError(t, err, "Expected policy %s to exist in DB", policyID) + return &policy +} + +// VerifyPolicyNotInDB verifies that a policy does not exist in the DB. +func VerifyPolicyNotInDB(t *testing.T, db *gorm.DB, policyID string) { + t.Helper() + var count int64 + db.Model(&types.Policy{}).Where("id = ? AND account_id = ?", policyID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected policy %s to NOT exist in DB", policyID) +} + +// VerifyRouteInDB reads a route directly from the DB and returns it. +func VerifyRouteInDB(t *testing.T, db *gorm.DB, routeID route.ID) *route.Route { + t.Helper() + var r route.Route + err := db.Where("id = ? AND account_id = ?", routeID, TestAccountId).First(&r).Error + require.NoError(t, err, "Expected route %s to exist in DB", routeID) + return &r +} + +// VerifyRouteNotInDB verifies that a route does not exist in the DB. +func VerifyRouteNotInDB(t *testing.T, db *gorm.DB, routeID route.ID) { + t.Helper() + var count int64 + db.Model(&route.Route{}).Where("id = ? AND account_id = ?", routeID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected route %s to NOT exist in DB", routeID) +} + +// VerifyNSGroupInDB reads a nameserver group directly from the DB and returns it. +func VerifyNSGroupInDB(t *testing.T, db *gorm.DB, nsGroupID string) *nbdns.NameServerGroup { + t.Helper() + var nsGroup nbdns.NameServerGroup + err := db.Where("id = ? AND account_id = ?", nsGroupID, TestAccountId).First(&nsGroup).Error + require.NoError(t, err, "Expected NS group %s to exist in DB", nsGroupID) + return &nsGroup +} + +// VerifyNSGroupNotInDB verifies that a nameserver group does not exist in the DB. +func VerifyNSGroupNotInDB(t *testing.T, db *gorm.DB, nsGroupID string) { + t.Helper() + var count int64 + db.Model(&nbdns.NameServerGroup{}).Where("id = ? AND account_id = ?", nsGroupID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected NS group %s to NOT exist in DB", nsGroupID) +} + +// VerifyPeerInDB reads a peer directly from the DB and returns it. +func VerifyPeerInDB(t *testing.T, db *gorm.DB, peerID string) *nbpeer.Peer { + t.Helper() + var peer nbpeer.Peer + err := db.Where("id = ? AND account_id = ?", peerID, TestAccountId).First(&peer).Error + require.NoError(t, err, "Expected peer %s to exist in DB", peerID) + return &peer +} + +// VerifyPeerNotInDB verifies that a peer does not exist in the DB. +func VerifyPeerNotInDB(t *testing.T, db *gorm.DB, peerID string) { + t.Helper() + var count int64 + db.Model(&nbpeer.Peer{}).Where("id = ? AND account_id = ?", peerID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected peer %s to NOT exist in DB", peerID) +} + +// VerifySetupKeyInDB reads a setup key directly from the DB and returns it. +func VerifySetupKeyInDB(t *testing.T, db *gorm.DB, keyID string) *types.SetupKey { + t.Helper() + var key types.SetupKey + err := db.Where("id = ? AND account_id = ?", keyID, TestAccountId).First(&key).Error + require.NoError(t, err, "Expected setup key %s to exist in DB", keyID) + return &key +} + +// VerifySetupKeyNotInDB verifies that a setup key does not exist in the DB. +func VerifySetupKeyNotInDB(t *testing.T, db *gorm.DB, keyID string) { + t.Helper() + var count int64 + db.Model(&types.SetupKey{}).Where("id = ? AND account_id = ?", keyID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected setup key %s to NOT exist in DB", keyID) +} + +// VerifyUserInDB reads a user directly from the DB and returns it. +func VerifyUserInDB(t *testing.T, db *gorm.DB, userID string) *types.User { + t.Helper() + var user types.User + err := db.Where("id = ? AND account_id = ?", userID, TestAccountId).First(&user).Error + require.NoError(t, err, "Expected user %s to exist in DB", userID) + return &user +} + +// VerifyUserNotInDB verifies that a user does not exist in the DB. +func VerifyUserNotInDB(t *testing.T, db *gorm.DB, userID string) { + t.Helper() + var count int64 + db.Model(&types.User{}).Where("id = ? AND account_id = ?", userID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected user %s to NOT exist in DB", userID) +} + +// VerifyPATInDB reads a PAT directly from the DB and returns it. +func VerifyPATInDB(t *testing.T, db *gorm.DB, tokenID string) *types.PersonalAccessToken { + t.Helper() + var pat types.PersonalAccessToken + err := db.Where("id = ?", tokenID).First(&pat).Error + require.NoError(t, err, "Expected PAT %s to exist in DB", tokenID) + return &pat +} + +// VerifyPATNotInDB verifies that a PAT does not exist in the DB. +func VerifyPATNotInDB(t *testing.T, db *gorm.DB, tokenID string) { + t.Helper() + var count int64 + db.Model(&types.PersonalAccessToken{}).Where("id = ?", tokenID).Count(&count) + assert.Equal(t, int64(0), count, "Expected PAT %s to NOT exist in DB", tokenID) +} + +// VerifyAccountSettings reads the account and returns its settings from the DB. +func VerifyAccountSettings(t *testing.T, db *gorm.DB) *types.Account { + t.Helper() + var account types.Account + err := db.Where("id = ?", TestAccountId).First(&account).Error + require.NoError(t, err, "Expected account %s to exist in DB", TestAccountId) + return &account +} + +// VerifyNetworkInDB reads a network directly from the store and returns it. +func VerifyNetworkInDB(t *testing.T, db *gorm.DB, networkID string) *networkTypes.Network { + t.Helper() + var network networkTypes.Network + err := db.Where("id = ? AND account_id = ?", networkID, TestAccountId).First(&network).Error + require.NoError(t, err, "Expected network %s to exist in DB", networkID) + return &network +} + +// VerifyNetworkNotInDB verifies that a network does not exist in the DB. +func VerifyNetworkNotInDB(t *testing.T, db *gorm.DB, networkID string) { + t.Helper() + var count int64 + db.Model(&networkTypes.Network{}).Where("id = ? AND account_id = ?", networkID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected network %s to NOT exist in DB", networkID) +} + +// VerifyNetworkResourceInDB reads a network resource directly from the DB and returns it. +func VerifyNetworkResourceInDB(t *testing.T, db *gorm.DB, resourceID string) *resourceTypes.NetworkResource { + t.Helper() + var resource resourceTypes.NetworkResource + err := db.Where("id = ? AND account_id = ?", resourceID, TestAccountId).First(&resource).Error + require.NoError(t, err, "Expected network resource %s to exist in DB", resourceID) + return &resource +} + +// VerifyNetworkResourceNotInDB verifies that a network resource does not exist in the DB. +func VerifyNetworkResourceNotInDB(t *testing.T, db *gorm.DB, resourceID string) { + t.Helper() + var count int64 + db.Model(&resourceTypes.NetworkResource{}).Where("id = ? AND account_id = ?", resourceID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected network resource %s to NOT exist in DB", resourceID) +} + +// VerifyNetworkRouterInDB reads a network router directly from the DB and returns it. +func VerifyNetworkRouterInDB(t *testing.T, db *gorm.DB, routerID string) *routerTypes.NetworkRouter { + t.Helper() + var router routerTypes.NetworkRouter + err := db.Where("id = ? AND account_id = ?", routerID, TestAccountId).First(&router).Error + require.NoError(t, err, "Expected network router %s to exist in DB", routerID) + return &router +} + +// VerifyNetworkRouterNotInDB verifies that a network router does not exist in the DB. +func VerifyNetworkRouterNotInDB(t *testing.T, db *gorm.DB, routerID string) { + t.Helper() + var count int64 + db.Model(&routerTypes.NetworkRouter{}).Where("id = ? AND account_id = ?", routerID, TestAccountId).Count(&count) + assert.Equal(t, int64(0), count, "Expected network router %s to NOT exist in DB", routerID) +} diff --git a/management/server/idp/embedded.go b/management/server/idp/embedded.go index 2cc7b9743..48d3221cc 100644 --- a/management/server/idp/embedded.go +++ b/management/server/idp/embedded.go @@ -13,6 +13,7 @@ import ( "github.com/netbirdio/netbird/idp/dex" "github.com/netbirdio/netbird/management/server/telemetry" + nbjwt "github.com/netbirdio/netbird/shared/auth/jwt" ) const ( @@ -48,6 +49,8 @@ type EmbeddedIdPConfig struct { // Existing local users are preserved and will be able to login again if re-enabled. // Cannot be enabled if no external identity provider connectors are configured. LocalAuthDisabled bool + // StaticConnectors are additional connectors to seed during initialization + StaticConnectors []dex.Connector } // EmbeddedStorageConfig holds storage configuration for the embedded IdP. @@ -157,6 +160,7 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) { RedirectURIs: cliRedirectURIs, }, }, + StaticConnectors: c.StaticConnectors, } // Add owner user if provided @@ -193,6 +197,9 @@ type OAuthConfigProvider interface { // Management server has embedded Dex and can validate tokens via localhost, // avoiding external network calls and DNS resolution issues during startup. GetLocalKeysLocation() string + // GetKeyFetcher returns a KeyFetcher that reads keys directly from the IDP storage, + // or nil if direct key fetching is not supported (falls back to HTTP). + GetKeyFetcher() nbjwt.KeyFetcher GetClientIDs() []string GetUserIDClaim() string GetTokenEndpoint() string @@ -593,6 +600,11 @@ func (m *EmbeddedIdPManager) GetCLIRedirectURLs() []string { return m.config.CLIRedirectURIs } +// GetKeyFetcher returns a KeyFetcher that reads keys directly from Dex storage. +func (m *EmbeddedIdPManager) GetKeyFetcher() nbjwt.KeyFetcher { + return m.provider.GetJWKS +} + // GetKeysLocation returns the JWKS endpoint URL for token validation. func (m *EmbeddedIdPManager) GetKeysLocation() string { return m.provider.GetKeysLocation() diff --git a/management/server/idp/migration/migration.go b/management/server/idp/migration/migration.go new file mode 100644 index 000000000..01cadb86d --- /dev/null +++ b/management/server/idp/migration/migration.go @@ -0,0 +1,235 @@ +// Package migration provides utility functions for migrating from the external IdP solution in pre v0.62.0 +// to the new embedded IdP manager (Dex based), which is the default in v0.62.0 and later. +// It includes functions to seed connectors and migrate existing users to use these connectors. +package migration + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/types" +) + +// Server is the dependency interface that migration functions use to access +// the main data store and the activity event store. +type Server interface { + Store() Store + EventStore() EventStore // may return nil +} + +const idpSeedInfoKey = "IDP_SEED_INFO" +const dryRunEnvKey = "NB_IDP_MIGRATION_DRY_RUN" + +func isDryRun() bool { + return os.Getenv(dryRunEnvKey) == "true" +} + +var ErrNoSeedInfo = errors.New("no seed info found in environment") + +// SeedConnectorFromEnv reads the IDP_SEED_INFO env var, base64-decodes it, +// and JSON-unmarshals it into a dex.Connector. Returns nil if not set. +func SeedConnectorFromEnv() (*dex.Connector, error) { + val, ok := os.LookupEnv(idpSeedInfoKey) + if !ok || val == "" { + return nil, ErrNoSeedInfo + } + + decoded, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return nil, fmt.Errorf("base64 decode: %w", err) + } + + var conn dex.Connector + if err := json.Unmarshal(decoded, &conn); err != nil { + return nil, fmt.Errorf("json unmarshal: %w", err) + } + + return &conn, nil +} + +// MigrateUsersToStaticConnectors re-keys every user ID in the main store (and +// the activity store, if present) so that it encodes the given connector ID, +// skipping users that have already been migrated. Set NB_IDP_MIGRATION_DRY_RUN=true +// to log what would happen without writing any changes. +func MigrateUsersToStaticConnectors(s Server, conn *dex.Connector) error { + ctx := context.Background() + + if isDryRun() { + log.Info("[DRY RUN] migration dry-run mode enabled, no changes will be written") + } + + users, err := s.Store().ListUsers(ctx) + if err != nil { + return fmt.Errorf("failed to list users: %w", err) + } + + // Reconciliation pass: fix activity store for users already migrated in main DB + // but whose activity references may still use old IDs (from a previous partial failure). + if s.EventStore() != nil && !isDryRun() { + if err := reconcileActivityStore(ctx, s.EventStore(), users); err != nil { + return err + } + } + + var migratedCount, skippedCount int + + for _, user := range users { + _, _, decErr := dex.DecodeDexUserID(user.Id) + if decErr == nil { + skippedCount++ + continue + } + + newUserID := dex.EncodeDexUserID(user.Id, conn.ID) + + if isDryRun() { + log.Infof("[DRY RUN] would migrate user %s -> %s (account: %s)", user.Id, newUserID, user.AccountID) + migratedCount++ + continue + } + + if err := migrateUser(ctx, s, user.Id, user.AccountID, newUserID); err != nil { + return err + } + + migratedCount++ + } + + if isDryRun() { + log.Infof("[DRY RUN] migration summary: %d users would be migrated, %d already migrated", migratedCount, skippedCount) + } else { + log.Infof("migration complete: %d users migrated, %d already migrated", migratedCount, skippedCount) + } + + return nil +} + +// reconcileActivityStore updates activity store references for users already migrated +// in the main DB whose activity entries may still use old IDs from a previous partial failure. +func reconcileActivityStore(ctx context.Context, eventStore EventStore, users []*types.User) error { + for _, user := range users { + originalID, _, err := dex.DecodeDexUserID(user.Id) + if err != nil { + // skip users that aren't migrated, they will be handled in the main migration loop + continue + } + if err := eventStore.UpdateUserID(ctx, originalID, user.Id); err != nil { + return fmt.Errorf("reconcile activity store for user %s: %w", user.Id, err) + } + } + return nil +} + +// migrateUser updates a single user's ID in both the main store and the activity store. +func migrateUser(ctx context.Context, s Server, oldID, accountID, newID string) error { + if err := s.Store().UpdateUserID(ctx, accountID, oldID, newID); err != nil { + return fmt.Errorf("failed to update user ID for user %s: %w", oldID, err) + } + + if s.EventStore() == nil { + return nil + } + + if err := s.EventStore().UpdateUserID(ctx, oldID, newID); err != nil { + return fmt.Errorf("failed to update activity store user ID for user %s: %w", oldID, err) + } + + return nil +} + +// PopulateUserInfo fetches user email and name from the external IDP and updates +// the store for users that are missing this information. +func PopulateUserInfo(s Server, idpManager idp.Manager, dryRun bool) error { + ctx := context.Background() + + users, err := s.Store().ListUsers(ctx) + if err != nil { + return fmt.Errorf("failed to list users: %w", err) + } + + // Build a map of IDP user ID -> UserData from the external IDP + allAccounts, err := idpManager.GetAllAccounts(ctx) + if err != nil { + return fmt.Errorf("failed to fetch accounts from IDP: %w", err) + } + + idpUsers := make(map[string]*idp.UserData) + for _, accountUsers := range allAccounts { + for _, userData := range accountUsers { + idpUsers[userData.ID] = userData + } + } + + log.Infof("fetched %d users from IDP", len(idpUsers)) + + var updatedCount, skippedCount, notFoundCount int + + for _, user := range users { + if user.IsServiceUser { + skippedCount++ + continue + } + + if user.Email != "" && user.Name != "" { + skippedCount++ + continue + } + + // The user ID in the store may be the original IDP ID or a Dex-encoded ID. + // Try to decode the Dex format first to get the original IDP ID. + lookupID := user.Id + if originalID, _, decErr := dex.DecodeDexUserID(user.Id); decErr == nil { + lookupID = originalID + } + + idpUser, found := idpUsers[lookupID] + if !found { + notFoundCount++ + log.Debugf("user %s (lookup: %s) not found in IDP, skipping", user.Id, lookupID) + continue + } + + email := user.Email + name := user.Name + if email == "" && idpUser.Email != "" { + email = idpUser.Email + } + if name == "" && idpUser.Name != "" { + name = idpUser.Name + } + + if email == user.Email && name == user.Name { + skippedCount++ + continue + } + + if dryRun { + log.Infof("[DRY RUN] would update user %s: email=%q, name=%q", user.Id, email, name) + updatedCount++ + continue + } + + if err := s.Store().UpdateUserInfo(ctx, user.Id, email, name); err != nil { + return fmt.Errorf("failed to update user info for %s: %w", user.Id, err) + } + + log.Infof("updated user %s: email=%q, name=%q", user.Id, email, name) + updatedCount++ + } + + if dryRun { + log.Infof("[DRY RUN] user info summary: %d would be updated, %d skipped, %d not found in IDP", updatedCount, skippedCount, notFoundCount) + } else { + log.Infof("user info population complete: %d updated, %d skipped, %d not found in IDP", updatedCount, skippedCount, notFoundCount) + } + + return nil +} diff --git a/management/server/idp/migration/migration_test.go b/management/server/idp/migration/migration_test.go new file mode 100644 index 000000000..2ff71347e --- /dev/null +++ b/management/server/idp/migration/migration_test.go @@ -0,0 +1,828 @@ +package migration + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/types" +) + +// testStore is a hand-written mock for MigrationStore. +type testStore struct { + listUsersFunc func(ctx context.Context) ([]*types.User, error) + updateUserIDFunc func(ctx context.Context, accountID, oldUserID, newUserID string) error + updateUserInfoFunc func(ctx context.Context, userID, email, name string) error + checkSchemaFunc func(checks []SchemaCheck) []SchemaError + updateCalls []updateUserIDCall + updateInfoCalls []updateUserInfoCall +} + +type updateUserIDCall struct { + AccountID string + OldUserID string + NewUserID string +} + +type updateUserInfoCall struct { + UserID string + Email string + Name string +} + +func (s *testStore) ListUsers(ctx context.Context) ([]*types.User, error) { + return s.listUsersFunc(ctx) +} + +func (s *testStore) UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error { + s.updateCalls = append(s.updateCalls, updateUserIDCall{accountID, oldUserID, newUserID}) + return s.updateUserIDFunc(ctx, accountID, oldUserID, newUserID) +} + +func (s *testStore) UpdateUserInfo(ctx context.Context, userID, email, name string) error { + s.updateInfoCalls = append(s.updateInfoCalls, updateUserInfoCall{userID, email, name}) + if s.updateUserInfoFunc != nil { + return s.updateUserInfoFunc(ctx, userID, email, name) + } + return nil +} + +func (s *testStore) CheckSchema(checks []SchemaCheck) []SchemaError { + if s.checkSchemaFunc != nil { + return s.checkSchemaFunc(checks) + } + return nil +} + +type testServer struct { + store Store + eventStore EventStore +} + +func (s *testServer) Store() Store { return s.store } +func (s *testServer) EventStore() EventStore { return s.eventStore } + +func TestSeedConnectorFromEnv(t *testing.T) { + t.Run("returns ErrNoSeedInfo when env var is not set", func(t *testing.T) { + os.Unsetenv(idpSeedInfoKey) + + conn, err := SeedConnectorFromEnv() + assert.ErrorIs(t, err, ErrNoSeedInfo) + assert.Nil(t, conn) + }) + + t.Run("returns ErrNoSeedInfo when env var is empty", func(t *testing.T) { + t.Setenv(idpSeedInfoKey, "") + + conn, err := SeedConnectorFromEnv() + assert.ErrorIs(t, err, ErrNoSeedInfo) + assert.Nil(t, conn) + }) + + t.Run("returns error on invalid base64", func(t *testing.T) { + t.Setenv(idpSeedInfoKey, "not-valid-base64!!!") + + conn, err := SeedConnectorFromEnv() + assert.NotErrorIs(t, err, ErrNoSeedInfo) + assert.Error(t, err) + assert.Nil(t, conn) + assert.Contains(t, err.Error(), "base64 decode") + }) + + t.Run("returns error on invalid JSON", func(t *testing.T) { + encoded := base64.StdEncoding.EncodeToString([]byte("not json")) + t.Setenv(idpSeedInfoKey, encoded) + + conn, err := SeedConnectorFromEnv() + assert.NotErrorIs(t, err, ErrNoSeedInfo) + assert.Error(t, err) + assert.Nil(t, conn) + assert.Contains(t, err.Error(), "json unmarshal") + }) + + t.Run("successfully decodes valid connector", func(t *testing.T) { + expected := dex.Connector{ + Type: "oidc", + Name: "Test Provider", + ID: "test-provider", + Config: map[string]any{ + "issuer": "https://example.com", + "clientID": "my-client-id", + "clientSecret": "my-secret", + }, + } + + data, err := json.Marshal(expected) + require.NoError(t, err) + + encoded := base64.StdEncoding.EncodeToString(data) + t.Setenv(idpSeedInfoKey, encoded) + + conn, err := SeedConnectorFromEnv() + assert.NoError(t, err) + require.NotNil(t, conn) + assert.Equal(t, expected.Type, conn.Type) + assert.Equal(t, expected.Name, conn.Name) + assert.Equal(t, expected.ID, conn.ID) + assert.Equal(t, expected.Config["issuer"], conn.Config["issuer"]) + }) +} + +func TestMigrateUsersToStaticConnectors(t *testing.T) { + connector := &dex.Connector{ + Type: "oidc", + Name: "Test Provider", + ID: "test-connector", + } + + t.Run("succeeds with no users", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { return nil, nil }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { return nil }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + }) + + t.Run("returns error when ListUsers fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return nil, fmt.Errorf("db error") + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { return nil }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to list users") + }) + + t.Run("migrates single user with correct encoded ID", func(t *testing.T) { + user := &types.User{Id: "user-1", AccountID: "account-1"} + expectedNewID := dex.EncodeDexUserID("user-1", "test-connector") + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{user}, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + require.Len(t, ms.updateCalls, 1) + assert.Equal(t, "account-1", ms.updateCalls[0].AccountID) + assert.Equal(t, "user-1", ms.updateCalls[0].OldUserID) + assert.Equal(t, expectedNewID, ms.updateCalls[0].NewUserID) + }) + + t.Run("migrates multiple users", func(t *testing.T) { + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + {Id: "user-3", AccountID: "account-2"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 3) + }) + + t.Run("returns error when UpdateUserID fails", func(t *testing.T) { + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + callCount := 0 + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + callCount++ + if callCount == 2 { + return fmt.Errorf("update failed") + } + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to update user ID for user user-2") + }) + + t.Run("stops on first UpdateUserID error", func(t *testing.T) { + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return fmt.Errorf("update failed") + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.Error(t, err) + assert.Len(t, ms.updateCalls, 1) // stopped after first error + }) + + t.Run("skips already migrated users", func(t *testing.T) { + alreadyMigratedID := dex.EncodeDexUserID("user-1", "test-connector") + users := []*types.User{ + {Id: alreadyMigratedID, AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 0) + }) + + t.Run("migrates only non-migrated users in mixed state", func(t *testing.T) { + alreadyMigratedID := dex.EncodeDexUserID("user-1", "test-connector") + users := []*types.User{ + {Id: alreadyMigratedID, AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + {Id: "user-3", AccountID: "account-2"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + // Only user-2 and user-3 should be migrated + assert.Len(t, ms.updateCalls, 2) + assert.Equal(t, "user-2", ms.updateCalls[0].OldUserID) + assert.Equal(t, "user-3", ms.updateCalls[1].OldUserID) + }) + + t.Run("dry run does not call UpdateUserID", func(t *testing.T) { + t.Setenv(dryRunEnvKey, "true") + + users := []*types.User{ + {Id: "user-1", AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + t.Fatal("UpdateUserID should not be called in dry-run mode") + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 0) + }) + + t.Run("dry run skips already migrated users", func(t *testing.T) { + t.Setenv(dryRunEnvKey, "true") + + alreadyMigratedID := dex.EncodeDexUserID("user-1", "test-connector") + users := []*types.User{ + {Id: alreadyMigratedID, AccountID: "account-1"}, + {Id: "user-2", AccountID: "account-1"}, + } + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return users, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + t.Fatal("UpdateUserID should not be called in dry-run mode") + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + }) + + t.Run("dry run disabled by default", func(t *testing.T) { + user := &types.User{Id: "user-1", AccountID: "account-1"} + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{user}, nil + }, + updateUserIDFunc: func(ctx context.Context, accountID, oldUserID, newUserID string) error { + return nil + }, + } + + srv := &testServer{store: ms} + err := MigrateUsersToStaticConnectors(srv, connector) + assert.NoError(t, err) + assert.Len(t, ms.updateCalls, 1) // proves it's not in dry-run + }) +} + +func TestPopulateUserInfo(t *testing.T) { + noopUpdateID := func(ctx context.Context, accountID, oldUserID, newUserID string) error { return nil } + + t.Run("succeeds with no users", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { return nil, nil }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{}, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("returns error when ListUsers fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return nil, fmt.Errorf("db error") + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{} + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to list users") + }) + + t.Run("returns error when GetAllAccounts fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{{Id: "user-1", AccountID: "acc-1"}}, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return nil, fmt.Errorf("idp error") + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to fetch accounts from IDP") + }) + + t.Run("updates user with missing email and name", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "user1@example.com", Name: "User One"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, "user-1", ms.updateInfoCalls[0].UserID) + assert.Equal(t, "user1@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "User One", ms.updateInfoCalls[0].Name) + }) + + t.Run("updates only missing email when name exists", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: "Existing Name"}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "user1@example.com", Name: "IDP Name"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, "user1@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "Existing Name", ms.updateInfoCalls[0].Name) + }) + + t.Run("updates only missing name when email exists", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "existing@example.com", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "idp@example.com", Name: "IDP Name"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, "existing@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "IDP Name", ms.updateInfoCalls[0].Name) + }) + + t.Run("skips users that already have both email and name", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "user1@example.com", Name: "User One"}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "different@example.com", Name: "Different Name"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("skips service users", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "svc-1", AccountID: "acc-1", Email: "", Name: "", IsServiceUser: true}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "svc-1", Email: "svc@example.com", Name: "Service"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("skips users not found in IDP", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "different-user", Email: "other@example.com", Name: "Other"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("looks up dex-encoded user IDs by original ID", func(t *testing.T) { + dexEncodedID := dex.EncodeDexUserID("original-idp-id", "my-connector") + + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: dexEncodedID, AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "original-idp-id", Email: "user@example.com", Name: "User"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 1) + assert.Equal(t, dexEncodedID, ms.updateInfoCalls[0].UserID) + assert.Equal(t, "user@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "User", ms.updateInfoCalls[0].Name) + }) + + t.Run("handles multiple users across multiple accounts", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + {Id: "user-2", AccountID: "acc-1", Email: "already@set.com", Name: "Already Set"}, + {Id: "user-3", AccountID: "acc-2", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "u1@example.com", Name: "User 1"}, + {ID: "user-2", Email: "u2@example.com", Name: "User 2"}, + }, + "acc-2": { + {ID: "user-3", Email: "u3@example.com", Name: "User 3"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + require.Len(t, ms.updateInfoCalls, 2) + assert.Equal(t, "user-1", ms.updateInfoCalls[0].UserID) + assert.Equal(t, "u1@example.com", ms.updateInfoCalls[0].Email) + assert.Equal(t, "user-3", ms.updateInfoCalls[1].UserID) + assert.Equal(t, "u3@example.com", ms.updateInfoCalls[1].Email) + }) + + t.Run("returns error when UpdateUserInfo fails", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + updateUserInfoFunc: func(ctx context.Context, userID, email, name string) error { + return fmt.Errorf("db write error") + }, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "u1@example.com", Name: "User 1"}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to update user info for user-1") + }) + + t.Run("stops on first UpdateUserInfo error", func(t *testing.T) { + callCount := 0 + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + {Id: "user-2", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + updateUserInfoFunc: func(ctx context.Context, userID, email, name string) error { + callCount++ + return fmt.Errorf("db write error") + }, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "u1@example.com", Name: "U1"}, + {ID: "user-2", Email: "u2@example.com", Name: "U2"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.Error(t, err) + assert.Equal(t, 1, callCount) + }) + + t.Run("dry run does not call UpdateUserInfo", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + {Id: "user-2", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + updateUserInfoFunc: func(ctx context.Context, userID, email, name string) error { + t.Fatal("UpdateUserInfo should not be called in dry-run mode") + return nil + }, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": { + {ID: "user-1", Email: "u1@example.com", Name: "U1"}, + {ID: "user-2", Email: "u2@example.com", Name: "U2"}, + }, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, true) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) + + t.Run("skips user when IDP has empty email and name too", func(t *testing.T) { + ms := &testStore{ + listUsersFunc: func(ctx context.Context) ([]*types.User, error) { + return []*types.User{ + {Id: "user-1", AccountID: "acc-1", Email: "", Name: ""}, + }, nil + }, + updateUserIDFunc: noopUpdateID, + } + mockIDP := &idp.MockIDP{ + GetAllAccountsFunc: func(ctx context.Context) (map[string][]*idp.UserData, error) { + return map[string][]*idp.UserData{ + "acc-1": {{ID: "user-1", Email: "", Name: ""}}, + }, nil + }, + } + + srv := &testServer{store: ms} + err := PopulateUserInfo(srv, mockIDP, false) + assert.NoError(t, err) + assert.Empty(t, ms.updateInfoCalls) + }) +} + +func TestSchemaError_String(t *testing.T) { + t.Run("missing table", func(t *testing.T) { + e := SchemaError{Table: "jobs"} + assert.Equal(t, `table "jobs" is missing`, e.String()) + }) + + t.Run("missing column", func(t *testing.T) { + e := SchemaError{Table: "users", Column: "email"} + assert.Equal(t, `column "email" on table "users" is missing`, e.String()) + }) +} + +func TestRequiredSchema(t *testing.T) { + // Verify RequiredSchema covers all the tables touched by UpdateUserID and UpdateUserInfo. + expectedTables := []string{ + "users", + "personal_access_tokens", + "peers", + "accounts", + "user_invites", + "proxy_access_tokens", + "jobs", + } + + schemaTableNames := make([]string, len(RequiredSchema)) + for i, s := range RequiredSchema { + schemaTableNames[i] = s.Table + } + + for _, expected := range expectedTables { + assert.Contains(t, schemaTableNames, expected, "RequiredSchema should include table %q", expected) + } +} + +func TestCheckSchema_MockStore(t *testing.T) { + t.Run("returns nil when all schema exists", func(t *testing.T) { + ms := &testStore{ + checkSchemaFunc: func(checks []SchemaCheck) []SchemaError { + return nil + }, + } + errs := ms.CheckSchema(RequiredSchema) + assert.Empty(t, errs) + }) + + t.Run("returns errors for missing tables", func(t *testing.T) { + ms := &testStore{ + checkSchemaFunc: func(checks []SchemaCheck) []SchemaError { + return []SchemaError{ + {Table: "jobs"}, + {Table: "proxy_access_tokens"}, + } + }, + } + errs := ms.CheckSchema(RequiredSchema) + require.Len(t, errs, 2) + assert.Equal(t, "jobs", errs[0].Table) + assert.Equal(t, "", errs[0].Column) + assert.Equal(t, "proxy_access_tokens", errs[1].Table) + }) + + t.Run("returns errors for missing columns", func(t *testing.T) { + ms := &testStore{ + checkSchemaFunc: func(checks []SchemaCheck) []SchemaError { + return []SchemaError{ + {Table: "users", Column: "email"}, + {Table: "users", Column: "name"}, + } + }, + } + errs := ms.CheckSchema(RequiredSchema) + require.Len(t, errs, 2) + assert.Equal(t, "users", errs[0].Table) + assert.Equal(t, "email", errs[0].Column) + }) +} diff --git a/management/server/idp/migration/store.go b/management/server/idp/migration/store.go new file mode 100644 index 000000000..e7cc54a41 --- /dev/null +++ b/management/server/idp/migration/store.go @@ -0,0 +1,82 @@ +package migration + +import ( + "context" + "fmt" + + "github.com/netbirdio/netbird/management/server/types" +) + +// SchemaCheck represents a table and the columns required on it. +type SchemaCheck struct { + Table string + Columns []string +} + +// RequiredSchema lists all tables and columns that the migration tool needs. +// If any are missing, the user must upgrade their management server first so +// that the automatic GORM migrations create them. +var RequiredSchema = []SchemaCheck{ + {Table: "users", Columns: []string{"id", "email", "name", "account_id"}}, + {Table: "personal_access_tokens", Columns: []string{"user_id", "created_by"}}, + {Table: "peers", Columns: []string{"user_id"}}, + {Table: "accounts", Columns: []string{"created_by"}}, + {Table: "user_invites", Columns: []string{"created_by"}}, + {Table: "proxy_access_tokens", Columns: []string{"created_by"}}, + {Table: "jobs", Columns: []string{"triggered_by"}}, +} + +// SchemaError describes a single missing table or column. +type SchemaError struct { + Table string + Column string // empty when the whole table is missing +} + +func (e SchemaError) String() string { + if e.Column == "" { + return fmt.Sprintf("table %q is missing", e.Table) + } + return fmt.Sprintf("column %q on table %q is missing", e.Column, e.Table) +} + +// Store defines the data store operations required for IdP user migration. +// This interface is separate from the main store.Store interface because these methods +// are only used during one-time migration and should be removed once migration tooling +// is no longer needed. +// +// The SQL store implementations (SqlStore) already have these methods on their concrete +// types, so they satisfy this interface via Go's structural typing with zero code changes. +type Store interface { + // ListUsers returns all users across all accounts. + ListUsers(ctx context.Context) ([]*types.User, error) + + // UpdateUserID atomically updates a user's ID and all foreign key references + // across the database (peers, groups, policies, PATs, etc.). + UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error + + // UpdateUserInfo updates a user's email and name in the store. + UpdateUserInfo(ctx context.Context, userID, email, name string) error + + // CheckSchema verifies that all tables and columns required by the migration + // exist in the database. Returns a list of problems; an empty slice means OK. + CheckSchema(checks []SchemaCheck) []SchemaError +} + +// RequiredEventSchema lists all tables and columns that the migration tool needs +// in the activity/event store. +var RequiredEventSchema = []SchemaCheck{ + {Table: "events", Columns: []string{"initiator_id", "target_id"}}, + {Table: "deleted_users", Columns: []string{"id"}}, +} + +// EventStore defines the activity event store operations required for migration. +// Like Store, this is a temporary interface for migration tooling only. +type EventStore interface { + // CheckSchema verifies that all tables and columns required by the migration + // exist in the event database. Returns a list of problems; an empty slice means OK. + CheckSchema(checks []SchemaCheck) []SchemaError + + // UpdateUserID updates all event references (initiator_id, target_id) and + // deleted_users records to use the new user ID format. + UpdateUserID(ctx context.Context, oldUserID, newUserID string) error +} diff --git a/management/server/instance/manager.go b/management/server/instance/manager.go index 19e3abdc0..9579d7a35 100644 --- a/management/server/instance/manager.go +++ b/management/server/instance/manager.go @@ -64,10 +64,19 @@ type Manager interface { GetVersionInfo(ctx context.Context) (*VersionInfo, error) } +type instanceStore interface { + GetAccountsCounter(ctx context.Context) (int64, error) +} + +type embeddedIdP interface { + CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) + GetAllAccounts(ctx context.Context) (map[string][]*idp.UserData, error) +} + // DefaultManager is the default implementation of Manager. type DefaultManager struct { - store store.Store - embeddedIdpManager *idp.EmbeddedIdPManager + store instanceStore + embeddedIdpManager embeddedIdP setupRequired bool setupMu sync.RWMutex @@ -82,18 +91,18 @@ type DefaultManager struct { // NewManager creates a new instance manager. // If idpManager is not an EmbeddedIdPManager, setup-related operations will return appropriate defaults. func NewManager(ctx context.Context, store store.Store, idpManager idp.Manager) (Manager, error) { - embeddedIdp, _ := idpManager.(*idp.EmbeddedIdPManager) + embeddedIdp, ok := idpManager.(*idp.EmbeddedIdPManager) m := &DefaultManager{ - store: store, - embeddedIdpManager: embeddedIdp, - setupRequired: false, + store: store, + setupRequired: false, httpClient: &http.Client{ Timeout: httpTimeout, }, } - if embeddedIdp != nil { + if ok && embeddedIdp != nil { + m.embeddedIdpManager = embeddedIdp err := m.loadSetupRequired(ctx) if err != nil { return nil, err @@ -143,36 +152,61 @@ func (m *DefaultManager) IsSetupRequired(_ context.Context) (bool, error) { // CreateOwnerUser creates the initial owner user in the embedded IDP. func (m *DefaultManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { - if err := m.validateSetupInfo(email, password, name); err != nil { - return nil, err - } - if m.embeddedIdpManager == nil { return nil, errors.New("embedded IDP is not enabled") } - m.setupMu.RLock() - setupRequired := m.setupRequired - m.setupMu.RUnlock() + if err := m.validateSetupInfo(email, password, name); err != nil { + return nil, err + } - if !setupRequired { + m.setupMu.Lock() + defer m.setupMu.Unlock() + + if !m.setupRequired { return nil, status.Errorf(status.PreconditionFailed, "setup already completed") } + if err := m.checkSetupRequiredFromDB(ctx); err != nil { + var sErr *status.Error + if errors.As(err, &sErr) && sErr.Type() == status.PreconditionFailed { + m.setupRequired = false + } + return nil, err + } + userData, err := m.embeddedIdpManager.CreateUserWithPassword(ctx, email, password, name) if err != nil { return nil, fmt.Errorf("failed to create user in embedded IdP: %w", err) } - m.setupMu.Lock() m.setupRequired = false - m.setupMu.Unlock() log.WithContext(ctx).Infof("created owner user %s in embedded IdP", email) return userData, nil } +func (m *DefaultManager) checkSetupRequiredFromDB(ctx context.Context) error { + numAccounts, err := m.store.GetAccountsCounter(ctx) + if err != nil { + return fmt.Errorf("failed to check accounts: %w", err) + } + if numAccounts > 0 { + return status.Errorf(status.PreconditionFailed, "setup already completed") + } + + users, err := m.embeddedIdpManager.GetAllAccounts(ctx) + if err != nil { + return fmt.Errorf("failed to check IdP users: %w", err) + } + if len(users) > 0 { + return status.Errorf(status.PreconditionFailed, "setup already completed") + } + + return nil +} + func (m *DefaultManager) validateSetupInfo(email, password, name string) error { if email == "" { return status.Errorf(status.InvalidArgument, "email is required") @@ -189,6 +223,9 @@ func (m *DefaultManager) validateSetupInfo(email, password, name string) error { if len(password) < 8 { return status.Errorf(status.InvalidArgument, "password must be at least 8 characters") } + if len(password) > 72 { + return status.Errorf(status.InvalidArgument, "password must be at most 72 characters") + } return nil } diff --git a/management/server/instance/manager_test.go b/management/server/instance/manager_test.go index 35d0ff53c..e3be9cfea 100644 --- a/management/server/instance/manager_test.go +++ b/management/server/instance/manager_test.go @@ -3,7 +3,12 @@ package instance import ( "context" "errors" + "fmt" + "net/http" + "sync" + "sync/atomic" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -11,173 +16,215 @@ import ( "github.com/netbirdio/netbird/management/server/idp" ) -// mockStore implements a minimal store.Store for testing +type mockIdP struct { + mu sync.Mutex + createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) + users map[string][]*idp.UserData + getAllAccountsErr error +} + +func (m *mockIdP) CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) { + if m.createUserFunc != nil { + return m.createUserFunc(ctx, email, password, name) + } + return &idp.UserData{ID: "test-user-id", Email: email, Name: name}, nil +} + +func (m *mockIdP) GetAllAccounts(_ context.Context) (map[string][]*idp.UserData, error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.getAllAccountsErr != nil { + return nil, m.getAllAccountsErr + } + return m.users, nil +} + type mockStore struct { accountsCount int64 err error } -func (m *mockStore) GetAccountsCounter(ctx context.Context) (int64, error) { +func (m *mockStore) GetAccountsCounter(_ context.Context) (int64, error) { if m.err != nil { return 0, m.err } return m.accountsCount, nil } -// mockEmbeddedIdPManager wraps the real EmbeddedIdPManager for testing -type mockEmbeddedIdPManager struct { - createUserFunc func(ctx context.Context, email, password, name string) (*idp.UserData, error) -} - -func (m *mockEmbeddedIdPManager) CreateUserWithPassword(ctx context.Context, email, password, name string) (*idp.UserData, error) { - if m.createUserFunc != nil { - return m.createUserFunc(ctx, email, password, name) +func newTestManager(idpMock *mockIdP, storeMock *mockStore) *DefaultManager { + return &DefaultManager{ + store: storeMock, + embeddedIdpManager: idpMock, + setupRequired: true, + httpClient: &http.Client{Timeout: httpTimeout}, } - return &idp.UserData{ - ID: "test-user-id", - Email: email, - Name: name, - }, nil -} - -// testManager is a test implementation that accepts our mock types -type testManager struct { - store *mockStore - embeddedIdpManager *mockEmbeddedIdPManager -} - -func (m *testManager) IsSetupRequired(ctx context.Context) (bool, error) { - if m.embeddedIdpManager == nil { - return false, nil - } - - count, err := m.store.GetAccountsCounter(ctx) - if err != nil { - return false, err - } - - return count == 0, nil -} - -func (m *testManager) CreateOwnerUser(ctx context.Context, email, password, name string) (*idp.UserData, error) { - if m.embeddedIdpManager == nil { - return nil, errors.New("embedded IDP is not enabled") - } - - return m.embeddedIdpManager.CreateUserWithPassword(ctx, email, password, name) -} - -func TestIsSetupRequired_EmbeddedIdPDisabled(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: nil, // No embedded IDP - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.False(t, required, "setup should not be required when embedded IDP is disabled") -} - -func TestIsSetupRequired_NoAccounts(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.True(t, required, "setup should be required when no accounts exist") -} - -func TestIsSetupRequired_AccountsExist(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 1}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.False(t, required, "setup should not be required when accounts exist") -} - -func TestIsSetupRequired_MultipleAccounts(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 5}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - required, err := manager.IsSetupRequired(context.Background()) - require.NoError(t, err) - assert.False(t, required, "setup should not be required when multiple accounts exist") -} - -func TestIsSetupRequired_StoreError(t *testing.T) { - manager := &testManager{ - store: &mockStore{err: errors.New("database error")}, - embeddedIdpManager: &mockEmbeddedIdPManager{}, - } - - _, err := manager.IsSetupRequired(context.Background()) - assert.Error(t, err, "should return error when store fails") } func TestCreateOwnerUser_Success(t *testing.T) { - expectedEmail := "admin@example.com" - expectedName := "Admin User" - expectedPassword := "securepassword123" + idpMock := &mockIdP{} + mgr := newTestManager(idpMock, &mockStore{}) - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: &mockEmbeddedIdPManager{ - createUserFunc: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { - assert.Equal(t, expectedEmail, email) - assert.Equal(t, expectedPassword, password) - assert.Equal(t, expectedName, name) - return &idp.UserData{ - ID: "created-user-id", - Email: email, - Name: name, - }, nil - }, - }, - } - - userData, err := manager.CreateOwnerUser(context.Background(), expectedEmail, expectedPassword, expectedName) + userData, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") require.NoError(t, err) - assert.Equal(t, "created-user-id", userData.ID) - assert.Equal(t, expectedEmail, userData.Email) - assert.Equal(t, expectedName, userData.Name) + assert.Equal(t, "admin@example.com", userData.Email) + + _, err = mgr.CreateOwnerUser(context.Background(), "admin2@example.com", "password123", "Admin2") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} + +func TestCreateOwnerUser_SetupAlreadyCompleted(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{}) + mgr.setupRequired = false + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") } func TestCreateOwnerUser_EmbeddedIdPDisabled(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: nil, - } + mgr := &DefaultManager{setupRequired: true} - _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") - assert.Error(t, err, "should return error when embedded IDP is disabled") + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) assert.Contains(t, err.Error(), "embedded IDP is not enabled") } func TestCreateOwnerUser_IdPError(t *testing.T) { - manager := &testManager{ - store: &mockStore{accountsCount: 0}, - embeddedIdpManager: &mockEmbeddedIdPManager{ - createUserFunc: func(ctx context.Context, email, password, name string) (*idp.UserData, error) { - return nil, errors.New("user already exists") - }, + idpMock := &mockIdP{ + createUserFunc: func(_ context.Context, _, _, _ string) (*idp.UserData, error) { + return nil, errors.New("provider error") }, } + mgr := newTestManager(idpMock, &mockStore{}) - _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") - assert.Error(t, err, "should return error when IDP fails") + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "provider error") + + required, _ := mgr.IsSetupRequired(context.Background()) + assert.True(t, required, "setup should still be required after IdP error") +} + +func TestCreateOwnerUser_TransientDBError_DoesNotBlockSetup(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{err: errors.New("connection refused")}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "connection refused") + + required, _ := mgr.IsSetupRequired(context.Background()) + assert.True(t, required, "setup should still be required after transient DB error") + + mgr.store = &mockStore{} + userData, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.NoError(t, err) + assert.Equal(t, "admin@example.com", userData.Email) +} + +func TestCreateOwnerUser_TransientIdPError_DoesNotBlockSetup(t *testing.T) { + idpMock := &mockIdP{getAllAccountsErr: errors.New("connection reset")} + mgr := newTestManager(idpMock, &mockStore{}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "connection reset") + + required, _ := mgr.IsSetupRequired(context.Background()) + assert.True(t, required, "setup should still be required after transient IdP error") + + idpMock.getAllAccountsErr = nil + userData, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.NoError(t, err) + assert.Equal(t, "admin@example.com", userData.Email) +} + +func TestCreateOwnerUser_DBCheckBlocksConcurrent(t *testing.T) { + idpMock := &mockIdP{ + users: map[string][]*idp.UserData{ + "acc1": {{ID: "existing-user"}}, + }, + } + mgr := newTestManager(idpMock, &mockStore{}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} + +func TestCreateOwnerUser_DBCheckBlocksWhenAccountsExist(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{accountsCount: 1}) + + _, err := mgr.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") + require.Error(t, err) + assert.Contains(t, err.Error(), "setup already completed") +} + +func TestCreateOwnerUser_ConcurrentRequests(t *testing.T) { + var idpCallCount atomic.Int32 + var successCount atomic.Int32 + var failCount atomic.Int32 + + idpMock := &mockIdP{ + createUserFunc: func(_ context.Context, email, _, _ string) (*idp.UserData, error) { + idpCallCount.Add(1) + time.Sleep(50 * time.Millisecond) + return &idp.UserData{ID: "user-1", Email: email, Name: "Owner"}, nil + }, + } + mgr := newTestManager(idpMock, &mockStore{}) + + var wg sync.WaitGroup + for i := range 10 { + wg.Add(1) + go func(idx int) { + defer wg.Done() + _, err := mgr.CreateOwnerUser( + context.Background(), + fmt.Sprintf("owner%d@example.com", idx), + "password1234", + fmt.Sprintf("Owner%d", idx), + ) + if err != nil { + failCount.Add(1) + } else { + successCount.Add(1) + } + }(i) + } + wg.Wait() + + assert.Equal(t, int32(1), successCount.Load(), "exactly one concurrent setup request should succeed") + assert.Equal(t, int32(9), failCount.Load(), "remaining concurrent requests should fail") + assert.Equal(t, int32(1), idpCallCount.Load(), "IdP CreateUser should be called exactly once") +} + +func TestIsSetupRequired_EmbeddedIdPDisabled(t *testing.T) { + mgr := &DefaultManager{} + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required) +} + +func TestIsSetupRequired_ReturnsFlag(t *testing.T) { + mgr := newTestManager(&mockIdP{}, &mockStore{}) + + required, err := mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.True(t, required) + + mgr.setupMu.Lock() + mgr.setupRequired = false + mgr.setupMu.Unlock() + + required, err = mgr.IsSetupRequired(context.Background()) + require.NoError(t, err) + assert.False(t, required) } func TestDefaultManager_ValidateSetupRequest(t *testing.T) { - manager := &DefaultManager{ - setupRequired: true, - } + manager := &DefaultManager{setupRequired: true} tests := []struct { name string @@ -188,11 +235,10 @@ func TestDefaultManager_ValidateSetupRequest(t *testing.T) { errorMsg string }{ { - name: "valid request", - email: "admin@example.com", - password: "password123", - userName: "Admin User", - expectError: false, + name: "valid request", + email: "admin@example.com", + password: "password123", + userName: "Admin User", }, { name: "empty email", @@ -235,11 +281,24 @@ func TestDefaultManager_ValidateSetupRequest(t *testing.T) { errorMsg: "password must be at least 8 characters", }, { - name: "password exactly 8 characters", + name: "password exactly 8 characters", + email: "admin@example.com", + password: "12345678", + userName: "Admin User", + }, + { + name: "password exactly 72 characters", + email: "admin@example.com", + password: "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhhiiiiiiii", + userName: "Admin User", + }, + { + name: "password too long", email: "admin@example.com", - password: "12345678", + password: "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhhiiiiiiiij", userName: "Admin User", - expectError: false, + expectError: true, + errorMsg: "password must be at most 72 characters", }, } @@ -255,14 +314,3 @@ func TestDefaultManager_ValidateSetupRequest(t *testing.T) { }) } } - -func TestDefaultManager_CreateOwnerUser_SetupAlreadyCompleted(t *testing.T) { - manager := &DefaultManager{ - setupRequired: false, - embeddedIdpManager: &idp.EmbeddedIdPManager{}, - } - - _, err := manager.CreateOwnerUser(context.Background(), "admin@example.com", "password123", "Admin") - require.Error(t, err) - assert.Contains(t, err.Error(), "setup already completed") -} diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go index 29555ed0c..7a51cc200 100644 --- a/management/server/migration/migration.go +++ b/management/server/migration/migration.go @@ -489,6 +489,102 @@ func MigrateJsonToTable[T any](ctx context.Context, db *gorm.DB, columnName stri return nil } +// hasForeignKey checks whether a foreign key constraint exists on the given table and column. +func hasForeignKey(db *gorm.DB, table, column string) bool { + var count int64 + + switch db.Name() { + case "postgres": + db.Raw(` + SELECT COUNT(*) FROM information_schema.key_column_usage kcu + JOIN information_schema.table_constraints tc + ON tc.constraint_name = kcu.constraint_name + AND tc.table_schema = kcu.table_schema + WHERE tc.constraint_type = 'FOREIGN KEY' + AND kcu.table_name = ? + AND kcu.column_name = ? + `, table, column).Scan(&count) + case "mysql": + db.Raw(` + SELECT COUNT(*) FROM information_schema.key_column_usage + WHERE table_schema = DATABASE() + AND table_name = ? + AND column_name = ? + AND referenced_table_name IS NOT NULL + `, table, column).Scan(&count) + default: // sqlite + type fkInfo struct { + From string + } + var fks []fkInfo + db.Raw(fmt.Sprintf("PRAGMA foreign_key_list(%s)", table)).Scan(&fks) + for _, fk := range fks { + if fk.From == column { + return true + } + } + return false + } + + return count > 0 +} + +// CleanupOrphanedResources deletes rows from the table of model T where the foreign +// key column (fkColumn) references a row in the table of model R that no longer exists. +func CleanupOrphanedResources[T any, R any](ctx context.Context, db *gorm.DB, fkColumn string) error { + var model T + var refModel R + + if !db.Migrator().HasTable(&model) { + log.WithContext(ctx).Debugf("table for %T does not exist, no cleanup needed", model) + return nil + } + + if !db.Migrator().HasTable(&refModel) { + log.WithContext(ctx).Debugf("referenced table for %T does not exist, no cleanup needed", refModel) + return nil + } + + stmtT := &gorm.Statement{DB: db} + if err := stmtT.Parse(&model); err != nil { + return fmt.Errorf("parse model %T: %w", model, err) + } + childTable := stmtT.Schema.Table + + stmtR := &gorm.Statement{DB: db} + if err := stmtR.Parse(&refModel); err != nil { + return fmt.Errorf("parse reference model %T: %w", refModel, err) + } + parentTable := stmtR.Schema.Table + + if !db.Migrator().HasColumn(&model, fkColumn) { + log.WithContext(ctx).Debugf("column %s does not exist in table %s, no cleanup needed", fkColumn, childTable) + return nil + } + + // If a foreign key constraint already exists on the column, the DB itself + // enforces referential integrity and orphaned rows cannot exist. + if hasForeignKey(db, childTable, fkColumn) { + log.WithContext(ctx).Debugf("foreign key constraint for %s already exists on %s, no cleanup needed", fkColumn, childTable) + return nil + } + + result := db.Exec( + fmt.Sprintf( + "DELETE FROM %s WHERE %s NOT IN (SELECT id FROM %s)", + childTable, fkColumn, parentTable, + ), + ) + if result.Error != nil { + return fmt.Errorf("cleanup orphaned rows in %s: %w", childTable, result.Error) + } + + log.WithContext(ctx).Infof("Cleaned up %d orphaned rows from %s where %s had no matching row in %s", + result.RowsAffected, childTable, fkColumn, parentTable) + + return nil +} + func RemoveDuplicatePeerKeys(ctx context.Context, db *gorm.DB) error { if !db.Migrator().HasTable("peers") { log.WithContext(ctx).Debug("peers table does not exist, skipping duplicate key cleanup") diff --git a/management/server/migration/migration_test.go b/management/server/migration/migration_test.go index c1be8a3a3..5e00976c2 100644 --- a/management/server/migration/migration_test.go +++ b/management/server/migration/migration_test.go @@ -441,3 +441,197 @@ func TestRemoveDuplicatePeerKeys_NoTable(t *testing.T) { err := migration.RemoveDuplicatePeerKeys(context.Background(), db) require.NoError(t, err, "Should not fail when table does not exist") } + +type testParent struct { + ID string `gorm:"primaryKey"` +} + +func (testParent) TableName() string { + return "test_parents" +} + +type testChild struct { + ID string `gorm:"primaryKey"` + ParentID string +} + +func (testChild) TableName() string { + return "test_children" +} + +type testChildWithFK struct { + ID string `gorm:"primaryKey"` + ParentID string `gorm:"index"` + Parent *testParent `gorm:"foreignKey:ParentID"` +} + +func (testChildWithFK) TableName() string { + return "test_children" +} + +func setupOrphanTestDB(t *testing.T, models ...any) *gorm.DB { + t.Helper() + db := setupDatabase(t) + for _, m := range models { + _ = db.Migrator().DropTable(m) + } + err := db.AutoMigrate(models...) + require.NoError(t, err, "Failed to auto-migrate tables") + return db +} + +func TestCleanupOrphanedResources_NoChildTable(t *testing.T) { + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testChild{}) + _ = db.Migrator().DropTable(&testParent{}) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err, "Should not fail when child table does not exist") +} + +func TestCleanupOrphanedResources_NoParentTable(t *testing.T) { + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testParent{}) + _ = db.Migrator().DropTable(&testChild{}) + + err := db.AutoMigrate(&testChild{}) + require.NoError(t, err) + + err = migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err, "Should not fail when parent table does not exist") +} + +func TestCleanupOrphanedResources_EmptyTables(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err, "Should not fail on empty tables") + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(0), count) +} + +func TestCleanupOrphanedResources_NoOrphans(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testParent{ID: "p2"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c2", ParentID: "p2"}).Error) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(2), count, "All children should remain when no orphans") +} + +func TestCleanupOrphanedResources_AllOrphans(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c1", "gone1").Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c2", "gone2").Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c3", "gone3").Error) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(0), count, "All orphaned children should be deleted") +} + +func TestCleanupOrphanedResources_MixedValidAndOrphaned(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testParent{ID: "p2"}).Error) + + require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c2", ParentID: "p2"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c3", ParentID: "p1"}).Error) + + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c4", "gone1").Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c5", "gone2").Error) + + err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var remaining []testChild + require.NoError(t, db.Order("id").Find(&remaining).Error) + + assert.Len(t, remaining, 3, "Only valid children should remain") + assert.Equal(t, "c1", remaining[0].ID) + assert.Equal(t, "c2", remaining[1].ID) + assert.Equal(t, "c3", remaining[2].ID) +} + +func TestCleanupOrphanedResources_Idempotent(t *testing.T) { + db := setupOrphanTestDB(t, &testParent{}, &testChild{}) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c2", "gone").Error) + + ctx := context.Background() + + err := migration.CleanupOrphanedResources[testChild, testParent](ctx, db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(1), count) + + err = migration.CleanupOrphanedResources[testChild, testParent](ctx, db, "parent_id") + require.NoError(t, err) + + db.Model(&testChild{}).Count(&count) + assert.Equal(t, int64(1), count, "Count should remain the same after second run") +} + +func TestCleanupOrphanedResources_SkipsWhenForeignKeyExists(t *testing.T) { + engine := os.Getenv("NETBIRD_STORE_ENGINE") + if engine != "postgres" && engine != "mysql" { + t.Skip("FK constraint early-exit test requires postgres or mysql") + } + + db := setupDatabase(t) + _ = db.Migrator().DropTable(&testChildWithFK{}) + _ = db.Migrator().DropTable(&testParent{}) + + err := db.AutoMigrate(&testParent{}, &testChildWithFK{}) + require.NoError(t, err) + + require.NoError(t, db.Create(&testParent{ID: "p1"}).Error) + require.NoError(t, db.Create(&testParent{ID: "p2"}).Error) + require.NoError(t, db.Create(&testChildWithFK{ID: "c1", ParentID: "p1"}).Error) + require.NoError(t, db.Create(&testChildWithFK{ID: "c2", ParentID: "p2"}).Error) + + switch engine { + case "postgres": + require.NoError(t, db.Exec("ALTER TABLE test_children DROP CONSTRAINT fk_test_children_parent").Error) + require.NoError(t, db.Exec("DELETE FROM test_parents WHERE id = ?", "p2").Error) + require.NoError(t, db.Exec( + "ALTER TABLE test_children ADD CONSTRAINT fk_test_children_parent "+ + "FOREIGN KEY (parent_id) REFERENCES test_parents(id) NOT VALID", + ).Error) + case "mysql": + require.NoError(t, db.Exec("SET FOREIGN_KEY_CHECKS = 0").Error) + require.NoError(t, db.Exec("ALTER TABLE test_children DROP FOREIGN KEY fk_test_children_parent").Error) + require.NoError(t, db.Exec("DELETE FROM test_parents WHERE id = ?", "p2").Error) + require.NoError(t, db.Exec( + "ALTER TABLE test_children ADD CONSTRAINT fk_test_children_parent "+ + "FOREIGN KEY (parent_id) REFERENCES test_parents(id)", + ).Error) + require.NoError(t, db.Exec("SET FOREIGN_KEY_CHECKS = 1").Error) + } + + err = migration.CleanupOrphanedResources[testChildWithFK, testParent](context.Background(), db, "parent_id") + require.NoError(t, err) + + var count int64 + db.Model(&testChildWithFK{}).Count(&count) + assert.Equal(t, int64(2), count, "Both rows should survive — migration must skip when FK constraint exists") +} diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index afd2021ac..ff369355e 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -46,7 +46,7 @@ type MockAccountManager struct { AddPeerFunc func(ctx context.Context, accountID string, setupKey string, userId string, peer *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error) GetGroupFunc func(ctx context.Context, accountID, groupID, userID string) (*types.Group, error) GetAllGroupsFunc func(ctx context.Context, accountID, userID string) ([]*types.Group, error) - GetGroupByNameFunc func(ctx context.Context, accountID, groupName string) (*types.Group, error) + GetGroupByNameFunc func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) SaveGroupFunc func(ctx context.Context, accountID, userID string, group *types.Group, create bool) error SaveGroupsFunc func(ctx context.Context, accountID, userID string, groups []*types.Group, create bool) error DeleteGroupFunc func(ctx context.Context, accountID, userId, groupID string) error @@ -406,9 +406,9 @@ func (am *MockAccountManager) AddPeer( } // GetGroupByName mock implementation of GetGroupByName from server.AccountManager interface -func (am *MockAccountManager) GetGroupByName(ctx context.Context, accountID, groupName string) (*types.Group, error) { +func (am *MockAccountManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) { if am.GetGroupByNameFunc != nil { - return am.GetGroupByNameFunc(ctx, accountID, groupName) + return am.GetGroupByNameFunc(ctx, groupName, accountID, userID) } return nil, status.Errorf(codes.Unimplemented, "method GetGroupByName is not implemented") } diff --git a/management/server/peer.go b/management/server/peer.go index 78ecbfcae..a02e34e0d 100644 --- a/management/server/peer.go +++ b/management/server/peer.go @@ -249,7 +249,7 @@ func (am *DefaultAccountManager) UpdatePeer(ctx context.Context, accountID, user if err != nil { newLabel = "" } else { - _, err := transaction.GetPeerIdByLabel(ctx, store.LockingStrengthNone, accountID, update.Name) + _, err := transaction.GetPeerIdByLabel(ctx, store.LockingStrengthNone, accountID, newLabel) if err == nil { newLabel = "" } @@ -859,7 +859,9 @@ func (am *DefaultAccountManager) AddPeer(ctx context.Context, accountID, setupKe opEvent.Meta["setup_key_name"] = peerAddConfig.SetupKeyName } - am.StoreEvent(ctx, opEvent.InitiatorID, opEvent.TargetID, opEvent.AccountID, opEvent.Activity, opEvent.Meta) + if !temporary { + am.StoreEvent(ctx, opEvent.InitiatorID, opEvent.TargetID, opEvent.AccountID, opEvent.Activity, opEvent.Meta) + } if err := am.networkMapController.OnPeersAdded(ctx, accountID, []string{newPeer.ID}); err != nil { log.WithContext(ctx).Errorf("failed to update network map cache for peer %s: %v", newPeer.ID, err) @@ -1480,9 +1482,11 @@ func deletePeers(ctx context.Context, am *DefaultAccountManager, transaction sto if err = transaction.DeletePeer(ctx, accountID, peer.ID); err != nil { return nil, err } - peerDeletedEvents = append(peerDeletedEvents, func() { - am.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) - }) + if !(peer.ProxyMeta.Embedded || peer.Meta.KernelVersion == "wasm") { + peerDeletedEvents = append(peerDeletedEvents, func() { + am.StoreEvent(ctx, userID, peer.ID, accountID, activity.PeerRemovedByUser, peer.EventMeta(dnsDomain)) + }) + } } return peerDeletedEvents, nil diff --git a/management/server/peer_test.go b/management/server/peer_test.go index b17757ffd..51c16d730 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -37,6 +37,7 @@ import ( "github.com/netbirdio/netbird/management/server/job" "github.com/netbirdio/netbird/management/server/permissions" "github.com/netbirdio/netbird/management/server/settings" + "github.com/netbirdio/netbird/shared/auth" "github.com/netbirdio/netbird/shared/management/status" "github.com/netbirdio/netbird/management/server/util" @@ -2738,3 +2739,70 @@ func TestProcessPeerAddAuth(t *testing.T) { assert.Empty(t, config.GroupsToAdd) }) } + +func TestUpdatePeer_DnsLabelCollisionWithFQDN(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err, "unable to create account manager") + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err, "unable to create an account") + + // Add first peer with hostname that produces DNS label "netbird1" + key1, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer1, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key1.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "netbird1.netbird.cloud"}, + }, false) + require.NoError(t, err, "unable to add first peer") + assert.Equal(t, "netbird1", peer1.DNSLabel) + + // Add second peer with a different hostname + key2, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer2, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key2.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "ip-10-29-5-130"}, + }, false) + require.NoError(t, err) + + update := peer2.Copy() + update.Name = "netbird1.demo.netbird.cloud" + updated, err := manager.UpdatePeer(context.Background(), accountID, userID, update) + require.NoError(t, err, "renaming peer should not fail with duplicate DNS label error") + assert.Equal(t, "netbird1.demo.netbird.cloud", updated.Name) + assert.NotEqual(t, "netbird1", updated.DNSLabel, "DNS label should not collide with existing peer") + assert.Contains(t, updated.DNSLabel, "netbird1-", "DNS label should be IP-based fallback") +} + +func TestUpdatePeer_DnsLabelUniqueName(t *testing.T) { + manager, _, err := createManager(t) + require.NoError(t, err, "unable to create account manager") + + accountID, err := manager.GetAccountIDByUserID(context.Background(), auth.UserAuth{UserId: userID}) + require.NoError(t, err, "unable to create an account") + + key1, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer1, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key1.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "web-server"}, + }, false) + require.NoError(t, err) + assert.Equal(t, "web-server", peer1.DNSLabel) + + // Add second peer and rename it to a unique FQDN whose first label doesn't collide + key2, err := wgtypes.GenerateKey() + require.NoError(t, err) + peer2, _, _, err := manager.AddPeer(context.Background(), "", "", userID, &nbpeer.Peer{ + Key: key2.PublicKey().String(), + Meta: nbpeer.PeerSystemMeta{Hostname: "old-name"}, + }, false) + require.NoError(t, err) + + update := peer2.Copy() + update.Name = "api-server.example.com" + updated, err := manager.UpdatePeer(context.Background(), accountID, userID, update) + require.NoError(t, err, "renaming to unique FQDN should succeed") + assert.Equal(t, "api-server", updated.DNSLabel, "DNS label should be first label of FQDN") +} diff --git a/management/server/posture_checks.go b/management/server/posture_checks.go index ba901c771..9562487c0 100644 --- a/management/server/posture_checks.go +++ b/management/server/posture_checks.go @@ -84,7 +84,7 @@ func (am *DefaultAccountManager) SavePostureChecks(ctx context.Context, accountI // DeletePostureChecks deletes a posture check by ID. func (am *DefaultAccountManager) DeletePostureChecks(ctx context.Context, accountID, postureChecksID, userID string) error { - allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Routes, operations.Read) + allowed, err := am.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Policies, operations.Delete) if err != nil { return status.NewPermissionValidationError(err) } diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index 59e909252..c2bb6520b 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -397,6 +397,11 @@ func (s *SqlStore) DeleteAccount(ctx context.Context, account *types.Account) er return result.Error } + result = tx.Select(clause.Associations).Delete(account.Services, "account_id = ?", account.Id) + if result.Error != nil { + return result.Error + } + result = tx.Select(clause.Associations).Delete(account) if result.Error != nil { return result.Error @@ -2081,7 +2086,8 @@ func (s *SqlStore) getPostureChecks(ctx context.Context, accountID string) ([]*p func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpservice.Service, error) { const serviceQuery = `SELECT id, account_id, name, domain, enabled, auth, meta_created_at, meta_certificate_issued_at, meta_status, proxy_cluster, - pass_host_header, rewrite_redirects, session_private_key, session_public_key + pass_host_header, rewrite_redirects, session_private_key, session_public_key, + mode, listen_port, port_auto_assigned, source, source_peer, terminated FROM services WHERE account_id = $1` const targetsQuery = `SELECT id, account_id, service_id, path, host, port, protocol, @@ -2098,6 +2104,9 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv var auth []byte var createdAt, certIssuedAt sql.NullTime var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString + var mode, source, sourcePeer sql.NullString + var terminated, portAutoAssigned sql.NullBool + var listenPort sql.NullInt64 err := row.Scan( &s.ID, &s.AccountID, @@ -2113,6 +2122,12 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv &s.RewriteRedirects, &sessionPrivateKey, &sessionPublicKey, + &mode, + &listenPort, + &portAutoAssigned, + &source, + &sourcePeer, + &terminated, ) if err != nil { return nil, err @@ -2144,7 +2159,24 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv if sessionPublicKey.Valid { s.SessionPublicKey = sessionPublicKey.String } - + if mode.Valid { + s.Mode = mode.String + } + if source.Valid { + s.Source = source.String + } + if sourcePeer.Valid { + s.SourcePeer = sourcePeer.String + } + if terminated.Valid { + s.Terminated = terminated.Bool + } + if portAutoAssigned.Valid { + s.PortAutoAssigned = portAutoAssigned.Bool + } + if listenPort.Valid { + s.ListenPort = uint16(listenPort.Int64) + } s.Targets = []*rpservice.Target{} return &s, nil }) @@ -5518,7 +5550,7 @@ func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string result := s.db.WithContext(ctx). Model(&proxy.Proxy{}). - Where("status = ? AND last_seen > ?", proxy.StatusConnected, time.Now().Add(-2*time.Minute)). + Where("status = ? AND last_seen > ?", proxy.StatusConnected, time.Now().Add(-proxyActiveThreshold)). Distinct("cluster_address"). Pluck("cluster_address", &addresses) @@ -5595,7 +5627,7 @@ func (s *SqlStore) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, result := s.db.Model(&proxy.Proxy{}). Select("MIN(id) as id, cluster_address as address, COUNT(*) as connected_proxies"). - Where("status = ? AND last_seen > ?", proxy.StatusConnected, time.Now().Add(-2*time.Minute)). + Where("status = ? AND last_seen > ?", proxy.StatusConnected, time.Now().Add(-proxyActiveThreshold)). Group("cluster_address"). Scan(&clusters) @@ -5607,6 +5639,63 @@ func (s *SqlStore) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, return clusters, nil } +// proxyActiveThreshold is the maximum age of a heartbeat for a proxy to be +// considered active. Must be at least 2x the heartbeat interval (1 min). +const proxyActiveThreshold = 2 * time.Minute + +var validCapabilityColumns = map[string]struct{}{ + "supports_custom_ports": {}, + "require_subdomain": {}, +} + +// GetClusterSupportsCustomPorts returns whether any active proxy in the cluster +// supports custom ports. Returns nil when no proxy reported the capability. +func (s *SqlStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + return s.getClusterCapability(ctx, clusterAddr, "supports_custom_ports") +} + +// GetClusterRequireSubdomain returns whether any active proxy in the cluster +// requires a subdomain. Returns nil when no proxy reported the capability. +func (s *SqlStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + return s.getClusterCapability(ctx, clusterAddr, "require_subdomain") +} + +// getClusterCapability returns an aggregated boolean capability for the given +// cluster. It checks active (connected, recently seen) proxies and returns: +// - *true if any proxy in the cluster has the capability set to true, +// - *false if at least one proxy reported but none set it to true, +// - nil if no proxy reported the capability at all. +func (s *SqlStore) getClusterCapability(ctx context.Context, clusterAddr, column string) *bool { + if _, ok := validCapabilityColumns[column]; !ok { + log.WithContext(ctx).Errorf("invalid capability column: %s", column) + return nil + } + + var result struct { + HasCapability bool + AnyTrue bool + } + + err := s.db.WithContext(ctx). + Model(&proxy.Proxy{}). + Select("COUNT(CASE WHEN "+column+" IS NOT NULL THEN 1 END) > 0 AS has_capability, "+ + "COALESCE(MAX(CASE WHEN "+column+" = true THEN 1 ELSE 0 END), 0) = 1 AS any_true"). + Where("cluster_address = ? AND status = ? AND last_seen > ?", + clusterAddr, "connected", time.Now().Add(-proxyActiveThreshold)). + Scan(&result).Error + + if err != nil { + log.WithContext(ctx).Errorf("query cluster capability %s for %s: %v", column, clusterAddr, err) + return nil + } + + if !result.HasCapability { + return nil + } + + return &result.AnyTrue +} + // CleanupStaleProxies deletes proxies that haven't sent heartbeat in the specified duration func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error { cutoffTime := time.Now().Add(-inactivityDuration) @@ -5626,3 +5715,61 @@ func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration t return nil } + +// GetRoutingPeerNetworks returns the distinct network names where the peer is assigned as a routing peer +// in an enabled network router, either directly or via peer groups. +func (s *SqlStore) GetRoutingPeerNetworks(_ context.Context, accountID, peerID string) ([]string, error) { + var routers []*routerTypes.NetworkRouter + if err := s.db.Select("peer, peer_groups, network_id").Where("account_id = ? AND enabled = true", accountID).Find(&routers).Error; err != nil { + return nil, status.Errorf(status.Internal, "failed to get enabled routers: %v", err) + } + + if len(routers) == 0 { + return nil, nil + } + + var groupPeers []types.GroupPeer + if err := s.db.Select("group_id").Where("account_id = ? AND peer_id = ?", accountID, peerID).Find(&groupPeers).Error; err != nil { + return nil, status.Errorf(status.Internal, "failed to get peer group memberships: %v", err) + } + + groupSet := make(map[string]struct{}, len(groupPeers)) + for _, gp := range groupPeers { + groupSet[gp.GroupID] = struct{}{} + } + + networkIDs := make(map[string]struct{}) + for _, r := range routers { + if r.Peer == peerID { + networkIDs[r.NetworkID] = struct{}{} + } else if r.Peer == "" { + for _, pg := range r.PeerGroups { + if _, ok := groupSet[pg]; ok { + networkIDs[r.NetworkID] = struct{}{} + break + } + } + } + } + + if len(networkIDs) == 0 { + return nil, nil + } + + ids := make([]string, 0, len(networkIDs)) + for id := range networkIDs { + ids = append(ids, id) + } + + var networks []*networkTypes.Network + if err := s.db.Select("name").Where("account_id = ? AND id IN ?", accountID, ids).Find(&networks).Error; err != nil { + return nil, status.Errorf(status.Internal, "failed to get networks: %v", err) + } + + names := make([]string, 0, len(networks)) + for _, n := range networks { + names = append(names, n.Name) + } + + return names, nil +} diff --git a/management/server/store/sql_store_idp_migration.go b/management/server/store/sql_store_idp_migration.go new file mode 100644 index 000000000..64962845b --- /dev/null +++ b/management/server/store/sql_store_idp_migration.go @@ -0,0 +1,177 @@ +package store + +// This file contains migration-only methods on SqlStore. +// They satisfy the migration.Store interface via duck typing. +// Delete this file when migration tooling is no longer needed. + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "gorm.io/gorm" + + "github.com/netbirdio/netbird/management/server/idp/migration" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/shared/management/status" +) + +func (s *SqlStore) CheckSchema(checks []migration.SchemaCheck) []migration.SchemaError { + migrator := s.db.Migrator() + var errs []migration.SchemaError + + for _, check := range checks { + if !migrator.HasTable(check.Table) { + errs = append(errs, migration.SchemaError{Table: check.Table}) + continue + } + for _, col := range check.Columns { + if !migrator.HasColumn(check.Table, col) { + errs = append(errs, migration.SchemaError{Table: check.Table, Column: col}) + } + } + } + + return errs +} + +func (s *SqlStore) ListUsers(ctx context.Context) ([]*types.User, error) { + tx := s.db + var users []*types.User + result := tx.Find(&users) + if result.Error != nil { + log.WithContext(ctx).Errorf("error when listing users from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "issue listing users from store") + } + + for _, user := range users { + if err := user.DecryptSensitiveData(s.fieldEncrypt); err != nil { + return nil, fmt.Errorf("decrypt user: %w", err) + } + } + + return users, nil +} + +// txDeferFKConstraints defers foreign key constraint checks for the duration of the transaction. +// MySQL is already handled by s.transaction (SET FOREIGN_KEY_CHECKS = 0). +func (s *SqlStore) txDeferFKConstraints(tx *gorm.DB) error { + if s.storeEngine == types.SqliteStoreEngine { + return tx.Exec("PRAGMA defer_foreign_keys = ON").Error + } + + if s.storeEngine != types.PostgresStoreEngine { + return nil + } + + // GORM creates FK constraints as NOT DEFERRABLE by default, so + // SET CONSTRAINTS ALL DEFERRED is a no-op unless we ALTER them first. + err := tx.Exec(` + DO $$ DECLARE r RECORD; + BEGIN + FOR r IN SELECT conname, conrelid::regclass AS tbl + FROM pg_constraint WHERE contype = 'f' AND NOT condeferrable + LOOP + EXECUTE format('ALTER TABLE %s ALTER CONSTRAINT %I DEFERRABLE INITIALLY IMMEDIATE', r.tbl, r.conname); + END LOOP; + END $$ + `).Error + if err != nil { + return fmt.Errorf("make FK constraints deferrable: %w", err) + } + return tx.Exec("SET CONSTRAINTS ALL DEFERRED").Error +} + +// txRestoreFKConstraints reverts FK constraints back to NOT DEFERRABLE after the +// deferred updates are done but before the transaction commits. +func (s *SqlStore) txRestoreFKConstraints(tx *gorm.DB) error { + if s.storeEngine != types.PostgresStoreEngine { + return nil + } + + return tx.Exec(` + DO $$ DECLARE r RECORD; + BEGIN + FOR r IN SELECT conname, conrelid::regclass AS tbl + FROM pg_constraint WHERE contype = 'f' AND condeferrable + LOOP + EXECUTE format('ALTER TABLE %s ALTER CONSTRAINT %I NOT DEFERRABLE', r.tbl, r.conname); + END LOOP; + END $$ + `).Error +} + +func (s *SqlStore) UpdateUserInfo(ctx context.Context, userID, email, name string) error { + user := &types.User{Email: email, Name: name} + if err := user.EncryptSensitiveData(s.fieldEncrypt); err != nil { + return fmt.Errorf("encrypt user info: %w", err) + } + + result := s.db.Model(&types.User{}).Where("id = ?", userID).Updates(map[string]any{ + "email": user.Email, + "name": user.Name, + }) + if result.Error != nil { + log.WithContext(ctx).Errorf("error updating user info for %s: %s", userID, result.Error) + return status.Errorf(status.Internal, "failed to update user info") + } + + return nil +} + +func (s *SqlStore) UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error { + type fkUpdate struct { + model any + column string + where string + } + + updates := []fkUpdate{ + {&types.PersonalAccessToken{}, "user_id", "user_id = ?"}, + {&types.PersonalAccessToken{}, "created_by", "created_by = ?"}, + {&nbpeer.Peer{}, "user_id", "user_id = ?"}, + {&types.UserInviteRecord{}, "created_by", "created_by = ?"}, + {&types.Account{}, "created_by", "created_by = ?"}, + {&types.ProxyAccessToken{}, "created_by", "created_by = ?"}, + {&types.Job{}, "triggered_by", "triggered_by = ?"}, + } + + log.Info("Updating user ID in the store") + err := s.transaction(func(tx *gorm.DB) error { + if err := s.txDeferFKConstraints(tx); err != nil { + return err + } + + for _, u := range updates { + if err := tx.Model(u.model).Where(u.where, oldUserID).Update(u.column, newUserID).Error; err != nil { + return fmt.Errorf("update %s: %w", u.column, err) + } + } + + if err := tx.Model(&types.User{}).Where(accountAndIDQueryCondition, accountID, oldUserID).Update("id", newUserID).Error; err != nil { + return fmt.Errorf("update users: %w", err) + } + + return nil + }) + if err != nil { + log.WithContext(ctx).Errorf("failed to update user ID in the store: %s", err) + return status.Errorf(status.Internal, "failed to update user ID in store") + } + + log.Info("Restoring FK constraints") + err = s.transaction(func(tx *gorm.DB) error { + if err := s.txRestoreFKConstraints(tx); err != nil { + return fmt.Errorf("restore FK constraints: %w", err) + } + + return nil + }) + if err != nil { + log.WithContext(ctx).Errorf("failed to restore FK constraints after user ID update: %s", err) + return status.Errorf(status.Internal, "failed to restore FK constraints after user ID update") + } + + return nil +} diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index bafa63580..8ea6c2ae5 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/require" nbdns "github.com/netbirdio/netbird/dns" + proxydomain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" + rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" @@ -350,6 +352,35 @@ func TestSqlite_DeleteAccount(t *testing.T) { }, } + account.Services = []*rpservice.Service{ + { + ID: "service_id", + AccountID: account.Id, + Name: "test service", + Domain: "svc.example.com", + Enabled: true, + Targets: []*rpservice.Target{ + { + AccountID: account.Id, + ServiceID: "service_id", + Host: "localhost", + Port: 8080, + Protocol: "http", + Enabled: true, + }, + }, + }, + } + + account.Domains = []*proxydomain.Domain{ + { + ID: "domain_id", + Domain: "custom.example.com", + AccountID: account.Id, + Validated: true, + }, + } + err = store.SaveAccount(context.Background(), account) require.NoError(t, err) @@ -411,6 +442,20 @@ func TestSqlite_DeleteAccount(t *testing.T) { require.NoError(t, err, "expecting no error after removing DeleteAccount when searching for network resources") require.Len(t, resources, 0, "expecting no network resources to be found after DeleteAccount") } + + domains, err := store.ListCustomDomains(context.Background(), account.Id) + require.NoError(t, err, "expecting no error after DeleteAccount when searching for custom domains") + require.Len(t, domains, 0, "expecting no custom domains to be found after DeleteAccount") + + var services []*rpservice.Service + err = store.(*SqlStore).db.Model(&rpservice.Service{}).Find(&services, "account_id = ?", account.Id).Error + require.NoError(t, err, "expecting no error after DeleteAccount when searching for services") + require.Len(t, services, 0, "expecting no services to be found after DeleteAccount") + + var targets []*rpservice.Target + err = store.(*SqlStore).db.Model(&rpservice.Target{}).Find(&targets, "account_id = ?", account.Id).Error + require.NoError(t, err, "expecting no error after DeleteAccount when searching for service targets") + require.Len(t, targets, 0, "expecting no service targets to be found after DeleteAccount") } func Test_GetAccount(t *testing.T) { diff --git a/management/server/store/sqlstore_bench_test.go b/management/server/store/sqlstore_bench_test.go index f2abafceb..81c4b33ae 100644 --- a/management/server/store/sqlstore_bench_test.go +++ b/management/server/store/sqlstore_bench_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" @@ -265,6 +266,7 @@ func setupBenchmarkDB(b testing.TB) (*SqlStore, func(), string) { &nbdns.NameServerGroup{}, &posture.Checks{}, &networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{}, &service.Service{}, &service.Target{}, + &domain.Domain{}, } for i := len(models) - 1; i >= 0; i-- { diff --git a/management/server/store/store.go b/management/server/store/store.go index fbfe62f8d..fd2e13915 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -124,7 +124,7 @@ type Store interface { GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error) GetResourceGroups(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) ([]*types.Group, error) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types.Group, error) - GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types.Group, error) + GetGroupByName(ctx context.Context, lockStrength LockingStrength, accountID, groupName string) (*types.Group, error) GetGroupsByIDs(ctx context.Context, lockStrength LockingStrength, accountID string, groupIDs []string) (map[string]*types.Group, error) CreateGroups(ctx context.Context, accountID string, groups []*types.Group) error UpdateGroups(ctx context.Context, accountID string, groups []*types.Group) error @@ -292,6 +292,8 @@ type Store interface { GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) GetActiveProxyClusterAddressesForAccount(ctx context.Context, accountID string) ([]string, error) GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error) + GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool + GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error GetProxyByAccountID(ctx context.Context, accountID string) (*proxy.Proxy, error) CountProxiesByAccountID(ctx context.Context, accountID string) (int64, error) @@ -299,6 +301,8 @@ type Store interface { DeleteProxy(ctx context.Context, proxyID string) error GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error) + + GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) } const ( @@ -453,6 +457,12 @@ func getMigrationsPreAuto(ctx context.Context) []migrationFunc { func(db *gorm.DB) error { return migration.RemoveDuplicatePeerKeys(ctx, db) }, + func(db *gorm.DB) error { + return migration.CleanupOrphanedResources[rpservice.Service, types.Account](ctx, db, "account_id") + }, + func(db *gorm.DB) error { + return migration.CleanupOrphanedResources[domain.Domain, types.Account](ctx, db, "account_id") + }, } } diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go index 39ca9173e..8e14b74b6 100644 --- a/management/server/store/store_mock.go +++ b/management/server/store/store_mock.go @@ -1432,6 +1432,34 @@ func (mr *MockStoreMockRecorder) GetAnyAccountID(ctx interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnyAccountID", reflect.TypeOf((*MockStore)(nil).GetAnyAccountID), ctx) } +// GetClusterRequireSubdomain mocks base method. +func (m *MockStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterRequireSubdomain", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterRequireSubdomain indicates an expected call of GetClusterRequireSubdomain. +func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr) +} + +// GetClusterSupportsCustomPorts mocks base method. +func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterSupportsCustomPorts", ctx, clusterAddr) + ret0, _ := ret[0].(*bool) + return ret0 +} + +// GetClusterSupportsCustomPorts indicates an expected call of GetClusterSupportsCustomPorts. +func (mr *MockStoreMockRecorder) GetClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCustomPorts", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCustomPorts), ctx, clusterAddr) +} + // GetCustomDomain mocks base method. func (m *MockStore) GetCustomDomain(ctx context.Context, accountID, domainID string) (*domain.Domain, error) { m.ctrl.T.Helper() @@ -1509,18 +1537,18 @@ func (mr *MockStoreMockRecorder) GetGroupByID(ctx, lockStrength, accountID, grou } // GetGroupByName mocks base method. -func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types2.Group, error) { +func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, accountID, groupName string) (*types2.Group, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, groupName, accountID) + ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, accountID, groupName) ret0, _ := ret[0].(*types2.Group) ret1, _ := ret[1].(error) return ret0, ret1 } // GetGroupByName indicates an expected call of GetGroupByName. -func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, groupName, accountID interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, accountID, groupName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, groupName, accountID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, accountID, groupName) } // GetGroupsByIDs mocks base method. @@ -2061,6 +2089,21 @@ func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, rout return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRouteByID", reflect.TypeOf((*MockStore)(nil).GetRouteByID), ctx, lockStrength, accountID, routeID) } +// GetRoutingPeerNetworks mocks base method. +func (m *MockStore) GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRoutingPeerNetworks", ctx, accountID, peerID) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRoutingPeerNetworks indicates an expected call of GetRoutingPeerNetworks. +func (mr *MockStoreMockRecorder) GetRoutingPeerNetworks(ctx, accountID, peerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoutingPeerNetworks", reflect.TypeOf((*MockStore)(nil).GetRoutingPeerNetworks), ctx, accountID, peerID) +} + // GetServiceByDomain mocks base method. func (m *MockStore) GetServiceByDomain(ctx context.Context, domain string) (*service.Service, error) { m.ctrl.T.Helper() diff --git a/management/server/types/account.go b/management/server/types/account.go index 269fc7a88..c448813db 100644 --- a/management/server/types/account.go +++ b/management/server/types/account.go @@ -18,6 +18,7 @@ import ( "github.com/netbirdio/netbird/client/ssh/auth" nbdns "github.com/netbirdio/netbird/dns" + proxydomain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain" "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service" "github.com/netbirdio/netbird/management/internals/modules/zones" "github.com/netbirdio/netbird/management/internals/modules/zones/records" @@ -101,6 +102,7 @@ type Account struct { DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"` PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"` Services []*service.Service `gorm:"foreignKey:AccountID;references:id"` + Domains []*proxydomain.Domain `gorm:"foreignKey:AccountID;references:id"` // Settings is a dictionary of Account settings Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"` Networks []*networkTypes.Network `gorm:"foreignKey:AccountID;references:id"` @@ -911,6 +913,11 @@ func (a *Account) Copy() *Account { services = append(services, svc.Copy()) } + domains := []*proxydomain.Domain{} + for _, domain := range a.Domains { + domains = append(domains, domain.Copy()) + } + return &Account{ Id: a.Id, CreatedBy: a.CreatedBy, @@ -936,6 +943,7 @@ func (a *Account) Copy() *Account { Onboarding: a.Onboarding, NetworkMapCache: a.NetworkMapCache, nmapInitOnce: a.nmapInitOnce, + Domains: domains, } } diff --git a/management/server/types/account_test.go b/management/server/types/account_test.go index af2896216..00ba29b7f 100644 --- a/management/server/types/account_test.go +++ b/management/server/types/account_test.go @@ -84,6 +84,12 @@ func setupTestAccount() *Account { }, }, Groups: map[string]*Group{ + "groupAll": { + ID: "groupAll", + Name: "All", + Peers: []string{"peer1", "peer2", "peer3", "peer11", "peer12", "peer21", "peer31", "peer32", "peer41", "peer51", "peer61"}, + Issued: GroupIssuedAPI, + }, "group1": { ID: "group1", Peers: []string{"peer11", "peer12"}, diff --git a/management/server/types/networkmap_benchmark_test.go b/management/server/types/networkmap_benchmark_test.go new file mode 100644 index 000000000..38272e7b0 --- /dev/null +++ b/management/server/types/networkmap_benchmark_test.go @@ -0,0 +1,217 @@ +package types_test + +import ( + "context" + "fmt" + "os" + "testing" + + nbdns "github.com/netbirdio/netbird/dns" + "github.com/netbirdio/netbird/management/server/types" +) + +type benchmarkScale struct { + name string + peers int + groups int +} + +var defaultScales = []benchmarkScale{ + {"100peers_5groups", 100, 5}, + {"500peers_20groups", 500, 20}, + {"1000peers_50groups", 1000, 50}, + {"5000peers_100groups", 5000, 100}, + {"10000peers_200groups", 10000, 200}, + {"20000peers_200groups", 20000, 200}, + {"30000peers_300groups", 30000, 300}, +} + +func skipCIBenchmark(b *testing.B) { + if os.Getenv("CI") == "true" { + b.Skip("Skipping benchmark in CI") + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Single Peer Network Map Generation +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_Components benchmarks the components-based approach for a single peer. +func BenchmarkNetworkMapGeneration_Components(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run(scale.name, func(b *testing.B) { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// All Peers (UpdateAccountPeers hot path) +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_AllPeers benchmarks generating network maps for ALL peers. +func BenchmarkNetworkMapGeneration_AllPeers(b *testing.B) { + skipCIBenchmark(b) + scales := []benchmarkScale{ + {"100peers_5groups", 100, 5}, + {"500peers_20groups", 500, 20}, + {"1000peers_50groups", 1000, 50}, + {"5000peers_100groups", 5000, 100}, + } + + for _, scale := range scales { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + + peerIDs := make([]string, 0, len(account.Peers)) + for peerID := range account.Peers { + peerIDs = append(peerIDs, peerID) + } + + b.Run("components/"+scale.name, func(b *testing.B) { + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + for _, peerID := range peerIDs { + _ = account.GetPeerNetworkMapFromComponents(ctx, peerID, nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Sub-operations +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_ComponentsCreation benchmarks components extraction. +func BenchmarkNetworkMapGeneration_ComponentsCreation(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run(scale.name, func(b *testing.B) { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, groupIDToUserIDs) + } + }) + } +} + +// BenchmarkNetworkMapGeneration_ComponentsCalculation benchmarks calculation from pre-built components. +func BenchmarkNetworkMapGeneration_ComponentsCalculation(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run(scale.name, func(b *testing.B) { + account, validatedPeers := scalableTestAccount(scale.peers, scale.groups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + components := account.GetPeerNetworkMapComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, groupIDToUserIDs) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = types.CalculateNetworkMapFromComponents(ctx, components) + } + }) + } +} + +// BenchmarkNetworkMapGeneration_PrecomputeMaps benchmarks precomputed map costs. +func BenchmarkNetworkMapGeneration_PrecomputeMaps(b *testing.B) { + skipCIBenchmark(b) + for _, scale := range defaultScales { + b.Run("ResourcePoliciesMap/"+scale.name, func(b *testing.B) { + account, _ := scalableTestAccount(scale.peers, scale.groups) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetResourcePoliciesMap() + } + }) + b.Run("ResourceRoutersMap/"+scale.name, func(b *testing.B) { + account, _ := scalableTestAccount(scale.peers, scale.groups) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetResourceRoutersMap() + } + }) + b.Run("ActiveGroupUsers/"+scale.name, func(b *testing.B) { + account, _ := scalableTestAccount(scale.peers, scale.groups) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetActiveGroupUsers() + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Scaling Analysis +// ────────────────────────────────────────────────────────────────────────────── + +// BenchmarkNetworkMapGeneration_GroupScaling tests group count impact on performance. +func BenchmarkNetworkMapGeneration_GroupScaling(b *testing.B) { + skipCIBenchmark(b) + groupCounts := []int{1, 5, 20, 50, 100, 200, 500} + for _, numGroups := range groupCounts { + b.Run(fmt.Sprintf("components_%dgroups", numGroups), func(b *testing.B) { + account, validatedPeers := scalableTestAccount(1000, numGroups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + }) + } +} + +// BenchmarkNetworkMapGeneration_PeerScaling tests peer count impact on performance. +func BenchmarkNetworkMapGeneration_PeerScaling(b *testing.B) { + skipCIBenchmark(b) + peerCounts := []int{50, 100, 500, 1000, 2000, 5000, 10000, 20000, 30000} + for _, numPeers := range peerCounts { + numGroups := numPeers / 20 + if numGroups < 1 { + numGroups = 1 + } + b.Run(fmt.Sprintf("components_%dpeers", numPeers), func(b *testing.B) { + account, validatedPeers := scalableTestAccount(numPeers, numGroups) + ctx := context.Background() + resourcePolicies := account.GetResourcePoliciesMap() + routers := account.GetResourceRoutersMap() + groupIDToUserIDs := account.GetActiveGroupUsers() + b.ReportAllocs() + b.ResetTimer() + for range b.N { + _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs) + } + }) + } +} diff --git a/management/server/types/networkmap_components_correctness_test.go b/management/server/types/networkmap_components_correctness_test.go new file mode 100644 index 000000000..5cd41ff10 --- /dev/null +++ b/management/server/types/networkmap_components_correctness_test.go @@ -0,0 +1,1192 @@ +package types_test + +import ( + "context" + "fmt" + "net" + "net/netip" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + nbdns "github.com/netbirdio/netbird/dns" + resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" + routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" + networkTypes "github.com/netbirdio/netbird/management/server/networks/types" + nbpeer "github.com/netbirdio/netbird/management/server/peer" + "github.com/netbirdio/netbird/management/server/posture" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/route" +) + +// scalableTestAccountWithoutDefaultPolicy creates an account without the blanket "Allow All" policy. +// Use this for tests that need to verify feature-specific connectivity in isolation. +func scalableTestAccountWithoutDefaultPolicy(numPeers, numGroups int) (*types.Account, map[string]struct{}) { + return buildScalableTestAccount(numPeers, numGroups, false) +} + +// scalableTestAccount creates a realistic account with a blanket "Allow All" policy +// plus per-group policies, routes, network resources, posture checks, and DNS settings. +func scalableTestAccount(numPeers, numGroups int) (*types.Account, map[string]struct{}) { + return buildScalableTestAccount(numPeers, numGroups, true) +} + +// buildScalableTestAccount is the core builder. When withDefaultPolicy is true it adds +// a blanket group-all <-> group-all allow rule; when false the only policies are the +// per-group ones, so tests can verify feature-specific connectivity in isolation. +func buildScalableTestAccount(numPeers, numGroups int, withDefaultPolicy bool) (*types.Account, map[string]struct{}) { + peers := make(map[string]*nbpeer.Peer, numPeers) + allGroupPeers := make([]string, 0, numPeers) + + for i := range numPeers { + peerID := fmt.Sprintf("peer-%d", i) + ip := net.IP{100, byte(64 + i/65536), byte((i / 256) % 256), byte(i % 256)} + wtVersion := "0.25.0" + if i%2 == 0 { + wtVersion = "0.40.0" + } + + p := &nbpeer.Peer{ + ID: peerID, + IP: ip, + Key: fmt.Sprintf("key-%s", peerID), + DNSLabel: fmt.Sprintf("peer%d", i), + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()}, + UserID: "user-admin", + Meta: nbpeer.PeerSystemMeta{WtVersion: wtVersion, GoOS: "linux"}, + } + + if i == numPeers-2 { + p.LoginExpirationEnabled = true + pastTimestamp := time.Now().Add(-2 * time.Hour) + p.LastLogin = &pastTimestamp + } + + peers[peerID] = p + allGroupPeers = append(allGroupPeers, peerID) + } + + groups := make(map[string]*types.Group, numGroups+1) + groups["group-all"] = &types.Group{ID: "group-all", Name: "All", Peers: allGroupPeers} + + peersPerGroup := numPeers / numGroups + if peersPerGroup < 1 { + peersPerGroup = 1 + } + + for g := range numGroups { + groupID := fmt.Sprintf("group-%d", g) + groupPeers := make([]string, 0, peersPerGroup) + start := g * peersPerGroup + end := start + peersPerGroup + if end > numPeers { + end = numPeers + } + for i := start; i < end; i++ { + groupPeers = append(groupPeers, fmt.Sprintf("peer-%d", i)) + } + groups[groupID] = &types.Group{ID: groupID, Name: fmt.Sprintf("Group %d", g), Peers: groupPeers} + } + + policies := make([]*types.Policy, 0, numGroups+2) + if withDefaultPolicy { + policies = append(policies, &types.Policy{ + ID: "policy-all", Name: "Default-Allow", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: "rule-all", Name: "Allow All", Enabled: true, Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{"group-all"}, Destinations: []string{"group-all"}, + }}, + }) + } + + for g := range numGroups { + groupID := fmt.Sprintf("group-%d", g) + dstGroup := fmt.Sprintf("group-%d", (g+1)%numGroups) + policies = append(policies, &types.Policy{ + ID: fmt.Sprintf("policy-%d", g), Name: fmt.Sprintf("Policy %d", g), Enabled: true, + Rules: []*types.PolicyRule{{ + ID: fmt.Sprintf("rule-%d", g), Name: fmt.Sprintf("Rule %d", g), Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + Ports: []string{"8080"}, + Sources: []string{groupID}, Destinations: []string{dstGroup}, + }}, + }) + } + + if numGroups >= 2 { + policies = append(policies, &types.Policy{ + ID: "policy-drop", Name: "Drop DB traffic", Enabled: true, + Rules: []*types.PolicyRule{{ + ID: "rule-drop", Name: "Drop DB", Enabled: true, Action: types.PolicyTrafficActionDrop, + Protocol: types.PolicyRuleProtocolTCP, Ports: []string{"5432"}, Bidirectional: true, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }}, + }) + } + + numRoutes := numGroups + if numRoutes > 20 { + numRoutes = 20 + } + routes := make(map[route.ID]*route.Route, numRoutes) + for r := range numRoutes { + routeID := route.ID(fmt.Sprintf("route-%d", r)) + peerIdx := (numPeers / 2) + r + if peerIdx >= numPeers { + peerIdx = numPeers - 1 + } + routePeerID := fmt.Sprintf("peer-%d", peerIdx) + groupID := fmt.Sprintf("group-%d", r%numGroups) + routes[routeID] = &route.Route{ + ID: routeID, + Network: netip.MustParsePrefix(fmt.Sprintf("10.%d.0.0/16", r)), + Peer: peers[routePeerID].Key, + PeerID: routePeerID, + Description: fmt.Sprintf("Route %d", r), + Enabled: true, + PeerGroups: []string{groupID}, + Groups: []string{"group-all"}, + AccessControlGroups: []string{groupID}, + AccountID: "test-account", + } + } + + numResources := numGroups / 2 + if numResources < 1 { + numResources = 1 + } + if numResources > 50 { + numResources = 50 + } + + networkResources := make([]*resourceTypes.NetworkResource, 0, numResources) + networksList := make([]*networkTypes.Network, 0, numResources) + networkRouters := make([]*routerTypes.NetworkRouter, 0, numResources) + + routingPeerStart := numPeers * 3 / 4 + for nr := range numResources { + netID := fmt.Sprintf("net-%d", nr) + resID := fmt.Sprintf("res-%d", nr) + routerPeerIdx := routingPeerStart + nr + if routerPeerIdx >= numPeers { + routerPeerIdx = numPeers - 1 + } + routerPeerID := fmt.Sprintf("peer-%d", routerPeerIdx) + + networksList = append(networksList, &networkTypes.Network{ID: netID, Name: fmt.Sprintf("Network %d", nr), AccountID: "test-account"}) + networkResources = append(networkResources, &resourceTypes.NetworkResource{ + ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true, + Address: fmt.Sprintf("svc-%d.netbird.cloud", nr), + }) + networkRouters = append(networkRouters, &routerTypes.NetworkRouter{ + ID: fmt.Sprintf("router-%d", nr), NetworkID: netID, Peer: routerPeerID, + Enabled: true, AccountID: "test-account", + }) + + policies = append(policies, &types.Policy{ + ID: fmt.Sprintf("policy-res-%d", nr), Name: fmt.Sprintf("Resource Policy %d", nr), Enabled: true, + SourcePostureChecks: []string{"posture-check-ver"}, + Rules: []*types.PolicyRule{{ + ID: fmt.Sprintf("rule-res-%d", nr), Name: fmt.Sprintf("Allow Resource %d", nr), Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{fmt.Sprintf("group-%d", nr%numGroups)}, + DestinationResource: types.Resource{ID: resID}, + }}, + }) + } + + account := &types.Account{ + Id: "test-account", + Peers: peers, + Groups: groups, + Policies: policies, + Routes: routes, + Users: map[string]*types.User{ + "user-admin": {Id: "user-admin", Role: types.UserRoleAdmin, IsServiceUser: false, AccountID: "test-account"}, + }, + Network: &types.Network{ + Identifier: "net-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(10, 32)}, Serial: 1, + }, + DNSSettings: types.DNSSettings{DisabledManagementGroups: []string{}}, + NameServerGroups: map[string]*nbdns.NameServerGroup{ + "ns-group-main": { + ID: "ns-group-main", Name: "Main NS", Enabled: true, Groups: []string{"group-all"}, + NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: nbdns.UDPNameServerType, Port: 53}}, + }, + }, + PostureChecks: []*posture.Checks{ + {ID: "posture-check-ver", Name: "Check version", Checks: posture.ChecksDefinition{ + NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.26.0"}, + }}, + }, + NetworkResources: networkResources, + Networks: networksList, + NetworkRouters: networkRouters, + Settings: &types.Settings{PeerLoginExpirationEnabled: true, PeerLoginExpiration: 1 * time.Hour}, + } + + for _, p := range account.Policies { + p.AccountID = account.Id + } + for _, r := range account.Routes { + r.AccountID = account.Id + } + + validatedPeers := make(map[string]struct{}, numPeers) + for i := range numPeers { + peerID := fmt.Sprintf("peer-%d", i) + if i != numPeers-1 { + validatedPeers[peerID] = struct{}{} + } + } + + return account, validatedPeers +} + +// componentsNetworkMap is a convenience wrapper for GetPeerNetworkMapFromComponents. +func componentsNetworkMap(account *types.Account, peerID string, validatedPeers map[string]struct{}) *types.NetworkMap { + return account.GetPeerNetworkMapFromComponents( + context.Background(), peerID, nbdns.CustomZone{}, nil, + validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), + nil, account.GetActiveGroupUsers(), + ) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 1. PEER VISIBILITY & GROUPS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_PeerVisibility(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.Equal(t, len(validatedPeers)-1-len(nm.OfflinePeers), len(nm.Peers), "peer should see all other validated non-expired peers") +} + +func TestComponents_PeerDoesNotSeeItself(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-0", p.ID, "peer should not see itself") + } +} + +func TestComponents_IntraGroupConnectivity(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-5"], "peer-0 should see peer-5 from same group") +} + +func TestComponents_CrossGroupConnectivity(t *testing.T) { + // Without default policy, only per-group policies provide connectivity + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-10"], "peer-0 should see peer-10 from cross-group policy") +} + +func TestComponents_BidirectionalPolicy(t *testing.T) { + // Without default policy so bidirectional visibility comes only from per-group policies + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(100, 5) + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + nm20 := componentsNetworkMap(account, "peer-20", validatedPeers) + require.NotNil(t, nm0) + require.NotNil(t, nm20) + + peer0SeesPeer20 := false + for _, p := range nm0.Peers { + if p.ID == "peer-20" { + peer0SeesPeer20 = true + } + } + peer20SeesPeer0 := false + for _, p := range nm20.Peers { + if p.ID == "peer-0" { + peer20SeesPeer0 = true + } + } + assert.True(t, peer0SeesPeer20, "peer-0 should see peer-20 via bidirectional policy") + assert.True(t, peer20SeesPeer0, "peer-20 should see peer-0 via bidirectional policy") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 2. PEER EXPIRATION & ACCOUNT SETTINGS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_ExpiredPeerInOfflineList(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + offlineIDs := make(map[string]bool, len(nm.OfflinePeers)) + for _, p := range nm.OfflinePeers { + offlineIDs[p.ID] = true + } + assert.True(t, offlineIDs["peer-98"], "expired peer should be in OfflinePeers") + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-98", p.ID, "expired peer should not be in active Peers") + } +} + +func TestComponents_ExpirationDisabledSetting(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + account.Settings.PeerLoginExpirationEnabled = false + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-98"], "with expiration disabled, peer-98 should be in active Peers") +} + +func TestComponents_LoginExpiration_PeerLevel(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + account.Settings.PeerLoginExpirationEnabled = true + account.Settings.PeerLoginExpiration = 1 * time.Hour + + pastLogin := time.Now().Add(-2 * time.Hour) + account.Peers["peer-5"].LastLogin = &pastLogin + account.Peers["peer-5"].LoginExpirationEnabled = true + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + offlineIDs := make(map[string]bool, len(nm.OfflinePeers)) + for _, p := range nm.OfflinePeers { + offlineIDs[p.ID] = true + } + assert.True(t, offlineIDs["peer-5"], "login-expired peer should be in OfflinePeers") + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-5", p.ID, "login-expired peer should not be in active Peers") + } +} + +func TestComponents_NetworkSerial(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + account.Network.Serial = 42 + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.Equal(t, uint64(42), nm.Network.Serial, "network serial should match") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 3. NON-VALIDATED PEERS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_NonValidatedPeerExcluded(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + for _, p := range nm.Peers { + assert.NotEqual(t, "peer-99", p.ID, "non-validated peer should not appear in Peers") + } + for _, p := range nm.OfflinePeers { + assert.NotEqual(t, "peer-99", p.ID, "non-validated peer should not appear in OfflinePeers") + } +} + +func TestComponents_NonValidatedTargetPeerGetsEmptyMap(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-99", validatedPeers) + require.NotNil(t, nm) + assert.Empty(t, nm.Peers) + assert.Empty(t, nm.FirewallRules) +} + +func TestComponents_NonExistentPeerGetsEmptyMap(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-does-not-exist", validatedPeers) + require.NotNil(t, nm) + assert.Empty(t, nm.Peers) + assert.Empty(t, nm.FirewallRules) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 4. POLICIES & FIREWALL RULES +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_FirewallRulesGenerated(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.FirewallRules, "should have firewall rules from policies") +} + +func TestComponents_DropPolicyGeneratesDropRules(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasDropRule := false + for _, rule := range nm.FirewallRules { + if rule.Action == string(types.PolicyTrafficActionDrop) { + hasDropRule = true + break + } + } + assert.True(t, hasDropRule, "should have at least one drop firewall rule") +} + +func TestComponents_DisabledPolicyIgnored(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + for _, p := range account.Policies { + p.Enabled = false + } + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.Empty(t, nm.Peers, "disabled policies should yield no peers") + assert.Empty(t, nm.FirewallRules, "disabled policies should yield no firewall rules") +} + +func TestComponents_PortPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + has8080, has5432 := false, false + for _, rule := range nm.FirewallRules { + if rule.Port == "8080" { + has8080 = true + } + if rule.Port == "5432" { + has5432 = true + } + } + assert.True(t, has8080, "should have firewall rule for port 8080") + assert.True(t, has5432, "should have firewall rule for port 5432 (drop policy)") +} + +func TestComponents_PortRangePolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + account.Peers["peer-0"].Meta.WtVersion = "0.50.0" + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-port-range", Name: "Port Range", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-port-range", Name: "Port Range Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + PortRanges: []types.RulePortRange{{Start: 8000, End: 9000}}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }}, + }) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasPortRange := false + for _, rule := range nm.FirewallRules { + if rule.PortRange.Start == 8000 && rule.PortRange.End == 9000 { + hasPortRange = true + break + } + } + assert.True(t, hasPortRange, "should have firewall rule with port range 8000-9000") +} + +func TestComponents_FirewallRuleDirection(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasIn, hasOut := false, false + for _, rule := range nm.FirewallRules { + if rule.Direction == types.FirewallRuleDirectionIN { + hasIn = true + } + if rule.Direction == types.FirewallRuleDirectionOUT { + hasOut = true + } + } + assert.True(t, hasIn, "should have inbound firewall rules") + assert.True(t, hasOut, "should have outbound firewall rules") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 5. ROUTES +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_RoutesIncluded(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.Routes, "should have routes") +} + +func TestComponents_DisabledRouteExcluded(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 2) + for _, r := range account.Routes { + r.Enabled = false + } + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + for _, r := range nm.Routes { + assert.True(t, r.Enabled, "only enabled routes should appear") + } +} + +func TestComponents_RoutesFirewallRulesForACG(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.RoutesFirewallRules, "should have route firewall rules for access-controlled routes") +} + +func TestComponents_HARouteDeduplication(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + + haNetwork := netip.MustParsePrefix("172.16.0.0/16") + account.Routes["route-ha-1"] = &route.Route{ + ID: "route-ha-1", Network: haNetwork, PeerID: "peer-10", + Peer: account.Peers["peer-10"].Key, Enabled: true, Metric: 100, + Groups: []string{"group-all"}, PeerGroups: []string{"group-0"}, AccountID: "test-account", + } + account.Routes["route-ha-2"] = &route.Route{ + ID: "route-ha-2", Network: haNetwork, PeerID: "peer-20", + Peer: account.Peers["peer-20"].Key, Enabled: true, Metric: 200, + Groups: []string{"group-all"}, PeerGroups: []string{"group-1"}, AccountID: "test-account", + } + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + haRoutes := 0 + for _, r := range nm.Routes { + if r.Network == haNetwork { + haRoutes++ + } + } + // Components deduplicates HA routes with the same HA unique ID, returning one entry per HA group + assert.Equal(t, 1, haRoutes, "HA routes with same network should be deduplicated into one entry") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 6. NETWORK RESOURCES & ROUTERS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_NetworkResourceRoutes_RouterPeer(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + + var routerPeerID string + for _, nr := range account.NetworkRouters { + routerPeerID = nr.Peer + break + } + require.NotEmpty(t, routerPeerID) + + nm := componentsNetworkMap(account, routerPeerID, validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.Peers, "router peer should see source peers") +} + +func TestComponents_NetworkResourceRoutes_SourcePeerSeesRouterPeer(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + + var routerPeerID string + for _, nr := range account.NetworkRouters { + routerPeerID = nr.Peer + break + } + require.NotEmpty(t, routerPeerID) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs[routerPeerID], "source peer should see router peer for network resource") +} + +func TestComponents_DisabledNetworkResourceIgnored(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + for _, nr := range account.NetworkResources { + nr.Enabled = false + } + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotNil(t, nm.Network) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 7. POSTURE CHECKS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_PostureCheckFiltering_PassingPeer(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.NotEmpty(t, nm.Routes, "passing peer should have routes including resource routes") +} + +func TestComponents_PostureCheckFiltering_FailingPeer(t *testing.T) { + // peer-0 has version 0.40.0 (passes posture check >= 0.26.0) + // peer-1 has version 0.25.0 (fails posture check >= 0.26.0) + // Resource policies require posture-check-ver, so the failing peer + // should not see the router peer for those resources. + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(100, 5) + + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + nm1 := componentsNetworkMap(account, "peer-1", validatedPeers) + require.NotNil(t, nm0) + require.NotNil(t, nm1) + + // The passing peer should have more peers visible (including resource router peers) + // than the failing peer, because the failing peer is excluded from resource policies. + assert.Greater(t, len(nm0.Peers), len(nm1.Peers), + "passing peer (0.40.0) should see more peers than failing peer (0.25.0) due to posture-gated resource policies") +} + +func TestComponents_MultiplePostureChecks(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(50, 2) + + // Keep only the posture-gated policy — remove per-group policies so connectivity is isolated + account.Policies = []*types.Policy{} + + // Set kernel version on peers so the OS posture check can evaluate + for _, p := range account.Peers { + p.Meta.KernelVersion = "5.15.0" + } + + account.PostureChecks = append(account.PostureChecks, &posture.Checks{ + ID: "posture-check-os", Name: "Check OS", + Checks: posture.ChecksDefinition{ + OSVersionCheck: &posture.OSVersionCheck{Linux: &posture.MinKernelVersionCheck{MinKernelVersion: "0.0.1"}}, + }, + }) + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-posture", Name: "Multi Posture", Enabled: true, AccountID: "test-account", + SourcePostureChecks: []string{"posture-check-ver", "posture-check-os"}, + Rules: []*types.PolicyRule{{ + ID: "rule-multi-posture", Name: "Multi Check Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, + Bidirectional: true, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }}, + }) + + // peer-0 (0.40.0, kernel 5.15.0) passes both checks, should see group-1 peers + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + assert.NotEmpty(t, nm0.Peers, "peer passing both posture checks should see destination peers") + + // peer-1 (0.25.0, kernel 5.15.0) fails version check, should NOT see group-1 peers + nm1 := componentsNetworkMap(account, "peer-1", validatedPeers) + require.NotNil(t, nm1) + assert.Empty(t, nm1.Peers, + "peer failing posture check should see no peers when posture-gated policy is the only connectivity") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 8. DNS +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_DNSConfigEnabled(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.DNSConfig.ServiceEnable, "DNS should be enabled") + assert.NotEmpty(t, nm.DNSConfig.NameServerGroups, "should have nameserver groups") +} + +func TestComponents_DNSDisabledByManagementGroup(t *testing.T) { + account, validatedPeers := scalableTestAccount(100, 5) + account.DNSSettings.DisabledManagementGroups = []string{"group-all"} + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.False(t, nm.DNSConfig.ServiceEnable, "DNS should be disabled for peer in disabled group") +} + +func TestComponents_DNSNameServerGroupDistribution(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + account.NameServerGroups["ns-group-0"] = &nbdns.NameServerGroup{ + ID: "ns-group-0", Name: "Group 0 NS", Enabled: true, Groups: []string{"group-0"}, + NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("1.1.1.1"), NSType: nbdns.UDPNameServerType, Port: 53}}, + } + + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + hasGroup0NS := false + for _, ns := range nm0.DNSConfig.NameServerGroups { + if ns.ID == "ns-group-0" { + hasGroup0NS = true + } + } + assert.True(t, hasGroup0NS, "peer-0 in group-0 should receive ns-group-0") + + nm10 := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm10) + hasGroup0NSForPeer10 := false + for _, ns := range nm10.DNSConfig.NameServerGroups { + if ns.ID == "ns-group-0" { + hasGroup0NSForPeer10 = true + } + } + assert.False(t, hasGroup0NSForPeer10, "peer-10 in group-1 should NOT receive ns-group-0") +} + +func TestComponents_DNSCustomZone(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + customZone := nbdns.CustomZone{ + Domain: "netbird.cloud.", + Records: []nbdns.SimpleRecord{ + {Name: "peer0.netbird.cloud.", Type: 1, Class: "IN", TTL: 300, RData: account.Peers["peer-0"].IP.String()}, + {Name: "peer1.netbird.cloud.", Type: 1, Class: "IN", TTL: 300, RData: account.Peers["peer-1"].IP.String()}, + }, + } + + nm := account.GetPeerNetworkMapFromComponents( + context.Background(), "peer-0", customZone, nil, + validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(), + nil, account.GetActiveGroupUsers(), + ) + require.NotNil(t, nm) + assert.True(t, nm.DNSConfig.ServiceEnable) +} + +// ────────────────────────────────────────────────────────────────────────────── +// 9. SSH +// ────────────────────────────────────────────────────────────────────────────── + +func TestComponents_SSHPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + account.Groups["ssh-users"] = &types.Group{ID: "ssh-users", Name: "SSH Users", Peers: []string{}} + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-ssh", Name: "Allow SSH", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolNetbirdSSH, + Bidirectional: false, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + AuthorizedGroups: map[string][]string{"ssh-users": {"root"}}, + }}, + }) + + nm := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.EnableSSH, "SSH should be enabled for destination peer of SSH policy") +} + +func TestComponents_SSHNotEnabledWithoutPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + assert.False(t, nm.EnableSSH, "SSH should not be enabled without SSH policy") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 10. CROSS-PEER CONSISTENCY +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_AllPeersGetValidMaps verifies that every validated peer gets a +// non-nil map with a consistent network serial and non-empty peer list. +func TestComponents_AllPeersGetValidMaps(t *testing.T) { + account, validatedPeers := scalableTestAccount(50, 5) + for peerID := range account.Peers { + if _, validated := validatedPeers[peerID]; !validated { + continue + } + nm := componentsNetworkMap(account, peerID, validatedPeers) + require.NotNil(t, nm, "network map should not be nil for %s", peerID) + assert.Equal(t, account.Network.Serial, nm.Network.Serial, "serial mismatch for %s", peerID) + assert.NotEmpty(t, nm.Peers, "validated peer %s should see other peers", peerID) + } +} + +// TestComponents_LargeScaleMapGeneration verifies that components can generate maps +// at larger scales without errors and with consistent output. +func TestComponents_LargeScaleMapGeneration(t *testing.T) { + scales := []struct{ peers, groups int }{ + {500, 20}, + {1000, 50}, + } + for _, s := range scales { + t.Run(fmt.Sprintf("%dpeers_%dgroups", s.peers, s.groups), func(t *testing.T) { + account, validatedPeers := scalableTestAccount(s.peers, s.groups) + testPeers := []string{"peer-0", fmt.Sprintf("peer-%d", s.peers/4), fmt.Sprintf("peer-%d", s.peers/2)} + for _, peerID := range testPeers { + nm := componentsNetworkMap(account, peerID, validatedPeers) + require.NotNil(t, nm, "network map should not be nil for %s", peerID) + assert.NotEmpty(t, nm.Peers, "peer %s should see other peers at scale", peerID) + assert.NotEmpty(t, nm.Routes, "peer %s should have routes at scale", peerID) + assert.Equal(t, account.Network.Serial, nm.Network.Serial, "serial mismatch for %s", peerID) + } + }) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// 11. PEER-AS-RESOURCE POLICIES +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_PeerAsSourceResource verifies that a policy with SourceResource.Type=Peer +// targets only that specific peer as the source. +func TestComponents_PeerAsSourceResource(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-peer-src", Name: "Peer Source Resource", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-peer-src", Name: "Peer Source Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + Ports: []string{"443"}, + SourceResource: types.Resource{ID: "peer-0", Type: types.ResourceTypePeer}, + Destinations: []string{"group-1"}, + }}, + }) + + // peer-0 is the source resource, should see group-1 peers + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + + has443 := false + for _, rule := range nm0.FirewallRules { + if rule.Port == "443" { + has443 = true + break + } + } + assert.True(t, has443, "peer-0 as source resource should have port 443 rule") +} + +// TestComponents_PeerAsDestinationResource verifies that a policy with DestinationResource.Type=Peer +// targets only that specific peer as the destination. +func TestComponents_PeerAsDestinationResource(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-peer-dst", Name: "Peer Dest Resource", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-peer-dst", Name: "Peer Dest Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, + Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, + Ports: []string{"443"}, + Sources: []string{"group-0"}, + DestinationResource: types.Resource{ID: "peer-15", Type: types.ResourceTypePeer}, + }}, + }) + + // peer-0 is in group-0 (source), should see peer-15 as destination + nm0 := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm0) + + peerIDs := make(map[string]bool, len(nm0.Peers)) + for _, p := range nm0.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-15"], "peer-0 should see peer-15 via peer-as-destination-resource policy") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 12. MULTIPLE RULES PER POLICY +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_MultipleRulesPerPolicy verifies a policy with multiple rules generates +// firewall rules for each. +func TestComponents_MultipleRulesPerPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-rule", Name: "Multi Rule Policy", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{ + { + ID: "rule-http", Name: "Allow HTTP", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"80"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + { + ID: "rule-https", Name: "Allow HTTPS", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"443"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + }, + }) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + has80, has443 := false, false + for _, rule := range nm.FirewallRules { + if rule.Port == "80" { + has80 = true + } + if rule.Port == "443" { + has443 = true + } + } + assert.True(t, has80, "should have firewall rule for port 80 from first rule") + assert.True(t, has443, "should have firewall rule for port 443 from second rule") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 13. SSH AUTHORIZED USERS CONTENT +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_SSHAuthorizedUsersContent verifies that SSH policies populate +// the AuthorizedUsers map with the correct users and machine mappings. +func TestComponents_SSHAuthorizedUsersContent(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Users["user-dev"] = &types.User{Id: "user-dev", Role: types.UserRoleUser, AccountID: "test-account", AutoGroups: []string{"ssh-users"}} + account.Groups["ssh-users"] = &types.Group{ID: "ssh-users", Name: "SSH Users", Peers: []string{}} + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-ssh", Name: "Allow SSH", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolNetbirdSSH, + Bidirectional: false, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + AuthorizedGroups: map[string][]string{"ssh-users": {"root", "admin"}}, + }}, + }) + + // peer-10 is in group-1 (destination) + nm := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.EnableSSH, "SSH should be enabled") + assert.NotNil(t, nm.AuthorizedUsers, "AuthorizedUsers should not be nil") + assert.NotEmpty(t, nm.AuthorizedUsers, "AuthorizedUsers should have entries") + + // Check that "root" machine user mapping exists + _, hasRoot := nm.AuthorizedUsers["root"] + _, hasAdmin := nm.AuthorizedUsers["admin"] + assert.True(t, hasRoot || hasAdmin, "AuthorizedUsers should contain 'root' or 'admin' machine user mapping") +} + +// TestComponents_SSHLegacyImpliedSSH verifies that a non-SSH ALL protocol policy with +// SSHEnabled peer implies legacy SSH access. +func TestComponents_SSHLegacyImpliedSSH(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + // Enable SSH on the destination peer + account.Peers["peer-10"].SSHEnabled = true + + // The default "Allow All" policy with Protocol=ALL + SSHEnabled peer should imply SSH + nm := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm) + assert.True(t, nm.EnableSSH, "SSH should be implied by ALL protocol policy with SSHEnabled peer") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 14. ROUTE DEFAULT PERMIT (no AccessControlGroups) +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_RouteDefaultPermit verifies that a route without AccessControlGroups +// generates default permit firewall rules (0.0.0.0/0 source). +func TestComponents_RouteDefaultPermit(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + // Add a route without ACGs — this peer is the routing peer + routingPeerID := "peer-5" + account.Routes["route-no-acg"] = &route.Route{ + ID: "route-no-acg", Network: netip.MustParsePrefix("192.168.99.0/24"), + PeerID: routingPeerID, Peer: account.Peers[routingPeerID].Key, + Enabled: true, Groups: []string{"group-all"}, PeerGroups: []string{"group-0"}, + AccessControlGroups: []string{}, + AccountID: "test-account", + } + + // The routing peer should get default permit route firewall rules + nm := componentsNetworkMap(account, routingPeerID, validatedPeers) + require.NotNil(t, nm) + + hasDefaultPermit := false + for _, rfr := range nm.RoutesFirewallRules { + for _, src := range rfr.SourceRanges { + if src == "0.0.0.0/0" || src == "::/0" { + hasDefaultPermit = true + break + } + } + } + assert.True(t, hasDefaultPermit, "route without ACG should have default permit rule with 0.0.0.0/0 source") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 15. MULTIPLE ROUTERS PER NETWORK +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_MultipleRoutersPerNetwork verifies that a network resource +// with multiple routers provides routes through all available routers. +func TestComponents_MultipleRoutersPerNetwork(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + + netID := "net-multi-router" + resID := "res-multi-router" + account.Networks = append(account.Networks, &networkTypes.Network{ID: netID, Name: "Multi Router Network", AccountID: "test-account"}) + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true, + Address: "multi-svc.netbird.cloud", + }) + account.NetworkRouters = append(account.NetworkRouters, + &routerTypes.NetworkRouter{ID: "router-a", NetworkID: netID, Peer: "peer-5", Enabled: true, AccountID: "test-account", Metric: 100}, + &routerTypes.NetworkRouter{ID: "router-b", NetworkID: netID, Peer: "peer-15", Enabled: true, AccountID: "test-account", Metric: 200}, + ) + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-multi-router-res", Name: "Multi Router Resource", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-multi-router-res", Name: "Allow Multi Router", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{"group-0"}, DestinationResource: types.Resource{ID: resID}, + }}, + }) + + // peer-0 is in group-0 (source), should see both router peers + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-5"], "source peer should see router-a (peer-5)") + assert.True(t, peerIDs["peer-15"], "source peer should see router-b (peer-15)") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 16. PEER-AS-NAMESERVER EXCLUSION +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_PeerIsNameserverExcludedFromNSGroup verifies that a peer serving +// as a nameserver does not receive its own NS group in DNS config. +func TestComponents_PeerIsNameserverExcludedFromNSGroup(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + // peer-0 has IP 100.64.0.0 — make it a nameserver + nsIP := account.Peers["peer-0"].IP + account.NameServerGroups["ns-self"] = &nbdns.NameServerGroup{ + ID: "ns-self", Name: "Self NS", Enabled: true, Groups: []string{"group-all"}, + NameServers: []nbdns.NameServer{{IP: netip.AddrFrom4([4]byte{nsIP[0], nsIP[1], nsIP[2], nsIP[3]}), NSType: nbdns.UDPNameServerType, Port: 53}}, + } + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + hasSelfNS := false + for _, ns := range nm.DNSConfig.NameServerGroups { + if ns.ID == "ns-self" { + hasSelfNS = true + } + } + assert.False(t, hasSelfNS, "peer serving as nameserver should NOT receive its own NS group") + + // peer-10 is NOT the nameserver, should receive the NS group + nm10 := componentsNetworkMap(account, "peer-10", validatedPeers) + require.NotNil(t, nm10) + hasNSForPeer10 := false + for _, ns := range nm10.DNSConfig.NameServerGroups { + if ns.ID == "ns-self" { + hasNSForPeer10 = true + } + } + assert.True(t, hasNSForPeer10, "non-nameserver peer should receive the NS group") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 17. DOMAIN NETWORK RESOURCES +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_DomainNetworkResource verifies that domain-based network resources +// produce routes with the correct domain configuration. +func TestComponents_DomainNetworkResource(t *testing.T) { + account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2) + + netID := "net-domain" + resID := "res-domain" + account.Networks = append(account.Networks, &networkTypes.Network{ID: netID, Name: "Domain Network", AccountID: "test-account"}) + account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{ + ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true, + Address: "api.example.com", Type: "domain", + }) + account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{ + ID: "router-domain", NetworkID: netID, Peer: "peer-5", Enabled: true, AccountID: "test-account", + }) + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-domain-res", Name: "Domain Resource Policy", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{{ + ID: "rule-domain-res", Name: "Allow Domain", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true, + Sources: []string{"group-0"}, DestinationResource: types.Resource{ID: resID}, + }}, + }) + + // peer-0 is source, should get route to the domain resource via peer-5 + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + peerIDs := make(map[string]bool, len(nm.Peers)) + for _, p := range nm.Peers { + peerIDs[p.ID] = true + } + assert.True(t, peerIDs["peer-5"], "source peer should see domain resource router peer") +} + +// ────────────────────────────────────────────────────────────────────────────── +// 18. DISABLED RULE WITHIN ENABLED POLICY +// ────────────────────────────────────────────────────────────────────────────── + +// TestComponents_DisabledRuleInEnabledPolicy verifies that a disabled rule within +// an enabled policy does not generate firewall rules. +func TestComponents_DisabledRuleInEnabledPolicy(t *testing.T) { + account, validatedPeers := scalableTestAccount(20, 2) + + account.Policies = append(account.Policies, &types.Policy{ + ID: "policy-mixed-rules", Name: "Mixed Rules", Enabled: true, AccountID: "test-account", + Rules: []*types.PolicyRule{ + { + ID: "rule-enabled", Name: "Enabled Rule", Enabled: true, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"3000"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + { + ID: "rule-disabled", Name: "Disabled Rule", Enabled: false, + Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP, + Bidirectional: true, Ports: []string{"3001"}, + Sources: []string{"group-0"}, Destinations: []string{"group-1"}, + }, + }, + }) + + nm := componentsNetworkMap(account, "peer-0", validatedPeers) + require.NotNil(t, nm) + + has3000, has3001 := false, false + for _, rule := range nm.FirewallRules { + if rule.Port == "3000" { + has3000 = true + } + if rule.Port == "3001" { + has3001 = true + } + } + assert.True(t, has3000, "enabled rule should generate firewall rule for port 3000") + assert.False(t, has3001, "disabled rule should NOT generate firewall rule for port 3001") +} diff --git a/management/server/user.go b/management/server/user.go index 327aec2d0..c1f984f2f 100644 --- a/management/server/user.go +++ b/management/server/user.go @@ -417,6 +417,10 @@ func (am *DefaultAccountManager) CreatePAT(ctx context.Context, accountID string return nil, err } + if targetUser.AccountID != accountID { + return nil, status.NewPermissionDeniedError() + } + // @note this is essential to prevent non admin users with Pats create permission frpm creating one for a service user if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return nil, status.NewAdminPermissionError() @@ -457,6 +461,10 @@ func (am *DefaultAccountManager) DeletePAT(ctx context.Context, accountID string return err } + if targetUser.AccountID != accountID { + return status.NewPermissionDeniedError() + } + if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return status.NewAdminPermissionError() } @@ -496,6 +504,10 @@ func (am *DefaultAccountManager) GetPAT(ctx context.Context, accountID string, i return nil, err } + if targetUser.AccountID != accountID { + return nil, status.NewPermissionDeniedError() + } + if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return nil, status.NewAdminPermissionError() } @@ -523,6 +535,10 @@ func (am *DefaultAccountManager) GetAllPATs(ctx context.Context, accountID strin return nil, err } + if targetUser.AccountID != accountID { + return nil, status.NewPermissionDeniedError() + } + if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) { return nil, status.NewAdminPermissionError() } @@ -764,9 +780,15 @@ func (am *DefaultAccountManager) processUserUpdate(ctx context.Context, transact updatedUser.Role = update.Role updatedUser.Blocked = update.Blocked updatedUser.AutoGroups = update.AutoGroups - // these two fields can't be set via API, only via direct call to the method + // these fields can't be set via API, only via direct call to the method updatedUser.Issued = update.Issued updatedUser.IntegrationReference = update.IntegrationReference + if update.Name != "" { + updatedUser.Name = update.Name + } + if update.Email != "" { + updatedUser.Email = update.Email + } var transferredOwnerRole bool result, err := handleOwnerRoleTransfer(ctx, transaction, initiatorUser, update) diff --git a/management/server/user_test.go b/management/server/user_test.go index 800d2406c..8fdfbd633 100644 --- a/management/server/user_test.go +++ b/management/server/user_test.go @@ -336,6 +336,104 @@ func TestUser_GetAllPATs(t *testing.T) { assert.Equal(t, 2, len(pats)) } +func TestUser_PAT_CrossAccountProtection(t *testing.T) { + const ( + accountAID = "accountA" + accountBID = "accountB" + userAID = "userA" + adminBID = "adminB" + serviceUserBID = "serviceUserB" + regularUserBID = "regularUserB" + tokenBID = "tokenB1" + hashedTokenB = "SoMeHaShEdToKeNB" + ) + + setupStore := func(t *testing.T) (*DefaultAccountManager, func()) { + t.Helper() + + s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir()) + require.NoError(t, err, "creating store") + + accountA := newAccountWithId(context.Background(), accountAID, userAID, "", "", "", false) + require.NoError(t, s.SaveAccount(context.Background(), accountA)) + + accountB := newAccountWithId(context.Background(), accountBID, adminBID, "", "", "", false) + accountB.Users[serviceUserBID] = &types.User{ + Id: serviceUserBID, + AccountID: accountBID, + IsServiceUser: true, + ServiceUserName: "svcB", + Role: types.UserRoleAdmin, + PATs: map[string]*types.PersonalAccessToken{ + tokenBID: { + ID: tokenBID, + HashedToken: hashedTokenB, + }, + }, + } + accountB.Users[regularUserBID] = &types.User{ + Id: regularUserBID, + AccountID: accountBID, + Role: types.UserRoleUser, + } + require.NoError(t, s.SaveAccount(context.Background(), accountB)) + + pm := permissions.NewManager(s) + am := &DefaultAccountManager{ + Store: s, + eventStore: &activity.InMemoryEventStore{}, + permissionsManager: pm, + } + return am, cleanup + } + + t.Run("CreatePAT for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.CreatePAT(context.Background(), accountAID, userAID, serviceUserBID, "xss-token", 7) + require.Error(t, err, "cross-account CreatePAT must fail") + + _, err = am.CreatePAT(context.Background(), accountAID, userAID, regularUserBID, "xss-token", 7) + require.Error(t, err, "cross-account CreatePAT for regular user must fail") + + _, err = am.CreatePAT(context.Background(), accountBID, adminBID, serviceUserBID, "legit-token", 7) + require.NoError(t, err, "same-account CreatePAT should succeed") + }) + + t.Run("DeletePAT for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + err := am.DeletePAT(context.Background(), accountAID, userAID, serviceUserBID, tokenBID) + require.Error(t, err, "cross-account DeletePAT must fail") + }) + + t.Run("GetPAT for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.GetPAT(context.Background(), accountAID, userAID, serviceUserBID, tokenBID) + require.Error(t, err, "cross-account GetPAT must fail") + }) + + t.Run("GetAllPATs for user in different account is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.GetAllPATs(context.Background(), accountAID, userAID, serviceUserBID) + require.Error(t, err, "cross-account GetAllPATs must fail") + }) + + t.Run("CreatePAT with forged accountID targeting foreign user is denied", func(t *testing.T) { + am, cleanup := setupStore(t) + t.Cleanup(cleanup) + + _, err := am.CreatePAT(context.Background(), accountAID, userAID, adminBID, "forged", 7) + require.Error(t, err, "forged accountID CreatePAT must fail") + }) +} + func TestUser_Copy(t *testing.T) { // this is an imaginary case which will never be in DB this way user := types.User{ diff --git a/proxy/cmd/proxy/main.go b/proxy/cmd/proxy/main.go index 14e540a2e..16e7e8ac2 100644 --- a/proxy/cmd/proxy/main.go +++ b/proxy/cmd/proxy/main.go @@ -1,8 +1,13 @@ package main import ( + "net/http" + // nolint:gosec + _ "net/http/pprof" "runtime" + log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/proxy/cmd/proxy/cmd" ) @@ -21,6 +26,9 @@ var ( ) func main() { + go func() { + log.Println(http.ListenAndServe("localhost:6060", nil)) + }() cmd.SetVersionInfo(Version, Commit, BuildDate, GoVersion) cmd.Execute() } diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go index a4924d380..6063f070e 100644 --- a/proxy/internal/auth/middleware_test.go +++ b/proxy/internal/auth/middleware_test.go @@ -932,3 +932,71 @@ func TestProtect_HeaderAuth_SubsequentRequestUsesSessionCookie(t *testing.T) { assert.Equal(t, "header-user", capturedData2.GetUserID()) assert.Equal(t, "header", capturedData2.GetAuthMethod()) } + +// TestProtect_HeaderAuth_MultipleValuesSameHeader verifies that the proxy +// correctly handles multiple valid credentials for the same header name. +// In production, the mgmt gRPC authenticateHeader iterates all configured +// header auths and accepts if any hash matches (OR semantics). The proxy +// creates one Header scheme per entry, but a single gRPC call checks all. +func TestProtect_HeaderAuth_MultipleValuesSameHeader(t *testing.T) { + mw := NewMiddleware(log.StandardLogger(), nil, nil) + kp := generateTestKeyPair(t) + + // Mock simulates mgmt behavior: accepts either token-a or token-b. + accepted := map[string]bool{"Bearer token-a": true, "Bearer token-b": true} + mock := &mockAuthenticator{fn: func(_ context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) { + ha := req.GetHeaderAuth() + if ha != nil && accepted[ha.GetHeaderValue()] { + token, err := sessionkey.SignToken(kp.PrivateKey, "header-user", "example.com", auth.MethodHeader, time.Hour) + require.NoError(t, err) + return &proto.AuthenticateResponse{Success: true, SessionToken: token}, nil + } + return &proto.AuthenticateResponse{Success: false}, nil + }} + + // Single Header scheme (as if one entry existed), but the mock checks both values. + hdr := NewHeader(mock, "svc1", "acc1", "Authorization") + require.NoError(t, mw.AddDomain("example.com", []Scheme{hdr}, kp.PublicKey, time.Hour, "acc1", "svc1", nil)) + + var backendCalled bool + handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + backendCalled = true + w.WriteHeader(http.StatusOK) + })) + + t.Run("first value accepted", func(t *testing.T) { + backendCalled = false + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("Authorization", "Bearer token-a") + req = req.WithContext(proxy.WithCapturedData(req.Context(), proxy.NewCapturedData(""))) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.True(t, backendCalled, "first token should be accepted") + }) + + t.Run("second value accepted", func(t *testing.T) { + backendCalled = false + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("Authorization", "Bearer token-b") + req = req.WithContext(proxy.WithCapturedData(req.Context(), proxy.NewCapturedData(""))) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusOK, rec.Code) + assert.True(t, backendCalled, "second token should be accepted") + }) + + t.Run("unknown value rejected", func(t *testing.T) { + backendCalled = false + req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil) + req.Header.Set("Authorization", "Bearer token-c") + req = req.WithContext(proxy.WithCapturedData(req.Context(), proxy.NewCapturedData(""))) + rec := httptest.NewRecorder() + handler.ServeHTTP(rec, req) + + assert.Equal(t, http.StatusUnauthorized, rec.Code) + assert.False(t, backendCalled, "unknown token should be rejected") + }) +} diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go index 2f65792f2..cb6039c42 100644 --- a/proxy/management_integration_test.go +++ b/proxy/management_integration_test.go @@ -202,7 +202,7 @@ func (m *testAccessLogManager) GetAllAccessLogs(_ context.Context, _, _ string, // testProxyManager is a mock implementation of proxy.Manager for testing. type testProxyManager struct{} -func (m *testProxyManager) Connect(_ context.Context, _, _, _ string, _ *string) error { +func (m *testProxyManager) Connect(_ context.Context, _, _, _ string, _ *string, _ *nbproxy.Capabilities) error { return nil } @@ -226,6 +226,14 @@ func (m *testProxyManager) GetActiveClusters(_ context.Context) ([]nbproxy.Clust return nil, nil } +func (m *testProxyManager) ClusterSupportsCustomPorts(_ context.Context, _ string) *bool { + return nil +} + +func (m *testProxyManager) ClusterRequireSubdomain(_ context.Context, _ string) *bool { + return nil +} + func (m *testProxyManager) CleanupStale(_ context.Context, _ time.Duration) error { return nil } @@ -269,14 +277,6 @@ func (c *testProxyController) GetProxiesForCluster(_ string) []string { return nil } -func (c *testProxyController) ClusterSupportsCustomPorts(_ string) *bool { - return nil -} - -func (c *testProxyController) ClusterRequireSubdomain(_ string) *bool { - return nil -} - // storeBackedServiceManager reads directly from the real store. type storeBackedServiceManager struct { store store.Store diff --git a/proxy/web/package-lock.json b/proxy/web/package-lock.json index d16196d77..1611323a7 100644 --- a/proxy/web/package-lock.json +++ b/proxy/web/package-lock.json @@ -15,7 +15,7 @@ "tailwind-merge": "^2.6.0" }, "devDependencies": { - "@eslint/js": "^9.39.1", + "@eslint/js": "9.39.2", "@tailwindcss/vite": "^4.1.18", "@types/node": "^24.10.1", "@types/react": "^19.2.5", @@ -29,7 +29,7 @@ "tsx": "^4.21.0", "typescript": "~5.9.3", "typescript-eslint": "^8.46.4", - "vite": "^7.2.4" + "vite": "7.3.2" } }, "node_modules/@babel/code-frame": { @@ -1024,9 +1024,9 @@ "license": "MIT" }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", - "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.0.tgz", + "integrity": "sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==", "cpu": [ "arm" ], @@ -1038,9 +1038,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", - "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.0.tgz", + "integrity": "sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==", "cpu": [ "arm64" ], @@ -1052,9 +1052,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", - "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.0.tgz", + "integrity": "sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==", "cpu": [ "arm64" ], @@ -1066,9 +1066,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", - "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.0.tgz", + "integrity": "sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==", "cpu": [ "x64" ], @@ -1080,9 +1080,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", - "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.0.tgz", + "integrity": "sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==", "cpu": [ "arm64" ], @@ -1094,9 +1094,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", - "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.0.tgz", + "integrity": "sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==", "cpu": [ "x64" ], @@ -1108,9 +1108,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", - "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.0.tgz", + "integrity": "sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==", "cpu": [ "arm" ], @@ -1122,9 +1122,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", - "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.0.tgz", + "integrity": "sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==", "cpu": [ "arm" ], @@ -1136,9 +1136,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", - "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.0.tgz", + "integrity": "sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==", "cpu": [ "arm64" ], @@ -1150,9 +1150,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", - "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.0.tgz", + "integrity": "sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==", "cpu": [ "arm64" ], @@ -1164,9 +1164,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", - "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.0.tgz", + "integrity": "sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==", "cpu": [ "loong64" ], @@ -1178,9 +1178,9 @@ ] }, "node_modules/@rollup/rollup-linux-loong64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", - "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.0.tgz", + "integrity": "sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==", "cpu": [ "loong64" ], @@ -1192,9 +1192,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", - "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.0.tgz", + "integrity": "sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==", "cpu": [ "ppc64" ], @@ -1206,9 +1206,9 @@ ] }, "node_modules/@rollup/rollup-linux-ppc64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", - "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.0.tgz", + "integrity": "sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==", "cpu": [ "ppc64" ], @@ -1220,9 +1220,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", - "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.0.tgz", + "integrity": "sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==", "cpu": [ "riscv64" ], @@ -1234,9 +1234,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", - "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.0.tgz", + "integrity": "sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==", "cpu": [ "riscv64" ], @@ -1248,9 +1248,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", - "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.0.tgz", + "integrity": "sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==", "cpu": [ "s390x" ], @@ -1262,9 +1262,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", - "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.0.tgz", + "integrity": "sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==", "cpu": [ "x64" ], @@ -1276,9 +1276,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", - "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.0.tgz", + "integrity": "sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==", "cpu": [ "x64" ], @@ -1290,9 +1290,9 @@ ] }, "node_modules/@rollup/rollup-openbsd-x64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", - "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.0.tgz", + "integrity": "sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==", "cpu": [ "x64" ], @@ -1304,9 +1304,9 @@ ] }, "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", - "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.0.tgz", + "integrity": "sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==", "cpu": [ "arm64" ], @@ -1318,9 +1318,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", - "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.0.tgz", + "integrity": "sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==", "cpu": [ "arm64" ], @@ -1332,9 +1332,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", - "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.0.tgz", + "integrity": "sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==", "cpu": [ "ia32" ], @@ -1346,9 +1346,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", - "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.0.tgz", + "integrity": "sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==", "cpu": [ "x64" ], @@ -1360,9 +1360,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", - "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.0.tgz", + "integrity": "sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==", "cpu": [ "x64" ], @@ -1926,9 +1926,9 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.3.tgz", + "integrity": "sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==", "dev": true, "license": "MIT", "dependencies": { @@ -1936,13 +1936,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", "dev": true, "license": "ISC", "dependencies": { - "brace-expansion": "^2.0.1" + "brace-expansion": "^2.0.2" }, "engines": { "node": ">=16 || 14 >=14.17" @@ -2052,9 +2052,9 @@ } }, "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", "dev": true, "license": "MIT", "dependencies": { @@ -2109,9 +2109,9 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", + "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", "dev": true, "license": "MIT", "dependencies": { @@ -2657,9 +2657,9 @@ } }, "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", + "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", "dev": true, "license": "ISC" }, @@ -3243,9 +3243,9 @@ } }, "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", + "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", "dev": true, "license": "ISC", "dependencies": { @@ -3386,9 +3386,9 @@ "license": "ISC" }, "node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", "peer": true, @@ -3501,9 +3501,9 @@ } }, "node_modules/rollup": { - "version": "4.57.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz", - "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "version": "4.60.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.0.tgz", + "integrity": "sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3517,31 +3517,31 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.57.1", - "@rollup/rollup-android-arm64": "4.57.1", - "@rollup/rollup-darwin-arm64": "4.57.1", - "@rollup/rollup-darwin-x64": "4.57.1", - "@rollup/rollup-freebsd-arm64": "4.57.1", - "@rollup/rollup-freebsd-x64": "4.57.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", - "@rollup/rollup-linux-arm-musleabihf": "4.57.1", - "@rollup/rollup-linux-arm64-gnu": "4.57.1", - "@rollup/rollup-linux-arm64-musl": "4.57.1", - "@rollup/rollup-linux-loong64-gnu": "4.57.1", - "@rollup/rollup-linux-loong64-musl": "4.57.1", - "@rollup/rollup-linux-ppc64-gnu": "4.57.1", - "@rollup/rollup-linux-ppc64-musl": "4.57.1", - "@rollup/rollup-linux-riscv64-gnu": "4.57.1", - "@rollup/rollup-linux-riscv64-musl": "4.57.1", - "@rollup/rollup-linux-s390x-gnu": "4.57.1", - "@rollup/rollup-linux-x64-gnu": "4.57.1", - "@rollup/rollup-linux-x64-musl": "4.57.1", - "@rollup/rollup-openbsd-x64": "4.57.1", - "@rollup/rollup-openharmony-arm64": "4.57.1", - "@rollup/rollup-win32-arm64-msvc": "4.57.1", - "@rollup/rollup-win32-ia32-msvc": "4.57.1", - "@rollup/rollup-win32-x64-gnu": "4.57.1", - "@rollup/rollup-win32-x64-msvc": "4.57.1", + "@rollup/rollup-android-arm-eabi": "4.60.0", + "@rollup/rollup-android-arm64": "4.60.0", + "@rollup/rollup-darwin-arm64": "4.60.0", + "@rollup/rollup-darwin-x64": "4.60.0", + "@rollup/rollup-freebsd-arm64": "4.60.0", + "@rollup/rollup-freebsd-x64": "4.60.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.0", + "@rollup/rollup-linux-arm-musleabihf": "4.60.0", + "@rollup/rollup-linux-arm64-gnu": "4.60.0", + "@rollup/rollup-linux-arm64-musl": "4.60.0", + "@rollup/rollup-linux-loong64-gnu": "4.60.0", + "@rollup/rollup-linux-loong64-musl": "4.60.0", + "@rollup/rollup-linux-ppc64-gnu": "4.60.0", + "@rollup/rollup-linux-ppc64-musl": "4.60.0", + "@rollup/rollup-linux-riscv64-gnu": "4.60.0", + "@rollup/rollup-linux-riscv64-musl": "4.60.0", + "@rollup/rollup-linux-s390x-gnu": "4.60.0", + "@rollup/rollup-linux-x64-gnu": "4.60.0", + "@rollup/rollup-linux-x64-musl": "4.60.0", + "@rollup/rollup-openbsd-x64": "4.60.0", + "@rollup/rollup-openharmony-arm64": "4.60.0", + "@rollup/rollup-win32-arm64-msvc": "4.60.0", + "@rollup/rollup-win32-ia32-msvc": "4.60.0", + "@rollup/rollup-win32-x64-gnu": "4.60.0", + "@rollup/rollup-win32-x64-msvc": "4.60.0", "fsevents": "~2.3.2" } }, @@ -3803,9 +3803,9 @@ } }, "node_modules/vite": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", - "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.2.tgz", + "integrity": "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg==", "dev": true, "license": "MIT", "peer": true, diff --git a/proxy/web/package.json b/proxy/web/package.json index 97ec1ec0d..9a7c84ed4 100644 --- a/proxy/web/package.json +++ b/proxy/web/package.json @@ -17,7 +17,7 @@ "tailwind-merge": "^2.6.0" }, "devDependencies": { - "@eslint/js": "^9.39.1", + "@eslint/js": "9.39.2", "@tailwindcss/vite": "^4.1.18", "@types/node": "^24.10.1", "@types/react": "^19.2.5", @@ -31,6 +31,6 @@ "tsx": "^4.21.0", "typescript": "~5.9.3", "typescript-eslint": "^8.46.4", - "vite": "^7.2.4" + "vite": "7.3.2" } } diff --git a/relay/server/handshake.go b/relay/server/handshake.go index 8c3ee1899..067888406 100644 --- a/relay/server/handshake.go +++ b/relay/server/handshake.go @@ -1,11 +1,13 @@ package server import ( + "context" "fmt" - "net" + "time" log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/shared/relay/messages" //nolint:staticcheck "github.com/netbirdio/netbird/shared/relay/messages/address" @@ -13,6 +15,12 @@ import ( authmsg "github.com/netbirdio/netbird/shared/relay/messages/auth" ) +const ( + // handshakeTimeout bounds how long a connection may remain in the + // pre-authentication handshake phase before being closed. + handshakeTimeout = 10 * time.Second +) + type Validator interface { Validate(any) error // Deprecated: Use Validate instead. @@ -58,7 +66,7 @@ func marshalResponseHelloMsg(instanceURL string) ([]byte, error) { } type handshake struct { - conn net.Conn + conn listener.Conn validator Validator preparedMsg *preparedMsg @@ -66,9 +74,9 @@ type handshake struct { peerID *messages.PeerID } -func (h *handshake) handshakeReceive() (*messages.PeerID, error) { +func (h *handshake) handshakeReceive(ctx context.Context) (*messages.PeerID, error) { buf := make([]byte, messages.MaxHandshakeSize) - n, err := h.conn.Read(buf) + n, err := h.conn.Read(ctx, buf) if err != nil { return nil, fmt.Errorf("read from %s: %w", h.conn.RemoteAddr(), err) } @@ -103,7 +111,7 @@ func (h *handshake) handshakeReceive() (*messages.PeerID, error) { return peerID, nil } -func (h *handshake) handshakeResponse() error { +func (h *handshake) handshakeResponse(ctx context.Context) error { var responseMsg []byte if h.handshakeMethodAuth { responseMsg = h.preparedMsg.responseAuthMsg @@ -111,7 +119,7 @@ func (h *handshake) handshakeResponse() error { responseMsg = h.preparedMsg.responseHelloMsg } - if _, err := h.conn.Write(responseMsg); err != nil { + if _, err := h.conn.Write(ctx, responseMsg); err != nil { return fmt.Errorf("handshake response write to %s (%s): %w", h.peerID, h.conn.RemoteAddr(), err) } diff --git a/relay/server/listener/conn.go b/relay/server/listener/conn.go new file mode 100644 index 000000000..ef0869594 --- /dev/null +++ b/relay/server/listener/conn.go @@ -0,0 +1,14 @@ +package listener + +import ( + "context" + "net" +) + +// Conn is the relay connection contract implemented by WS and QUIC transports. +type Conn interface { + Read(ctx context.Context, b []byte) (n int, err error) + Write(ctx context.Context, b []byte) (n int, err error) + RemoteAddr() net.Addr + Close() error +} diff --git a/relay/server/listener/listener.go b/relay/server/listener/listener.go deleted file mode 100644 index 0a79182f4..000000000 --- a/relay/server/listener/listener.go +++ /dev/null @@ -1,14 +0,0 @@ -package listener - -import ( - "context" - "net" - - "github.com/netbirdio/netbird/relay/protocol" -) - -type Listener interface { - Listen(func(conn net.Conn)) error - Shutdown(ctx context.Context) error - Protocol() protocol.Protocol -} diff --git a/relay/server/listener/quic/conn.go b/relay/server/listener/quic/conn.go index 6e2201bf7..d8dafcd1f 100644 --- a/relay/server/listener/quic/conn.go +++ b/relay/server/listener/quic/conn.go @@ -3,33 +3,26 @@ package quic import ( "context" "errors" - "fmt" "net" "sync" - "time" "github.com/quic-go/quic-go" ) type Conn struct { - session *quic.Conn - closed bool - closedMu sync.Mutex - ctx context.Context - ctxCancel context.CancelFunc + session *quic.Conn + closed bool + closedMu sync.Mutex } func NewConn(session *quic.Conn) *Conn { - ctx, cancel := context.WithCancel(context.Background()) return &Conn{ - session: session, - ctx: ctx, - ctxCancel: cancel, + session: session, } } -func (c *Conn) Read(b []byte) (n int, err error) { - dgram, err := c.session.ReceiveDatagram(c.ctx) +func (c *Conn) Read(ctx context.Context, b []byte) (n int, err error) { + dgram, err := c.session.ReceiveDatagram(ctx) if err != nil { return 0, c.remoteCloseErrHandling(err) } @@ -38,33 +31,17 @@ func (c *Conn) Read(b []byte) (n int, err error) { return n, nil } -func (c *Conn) Write(b []byte) (int, error) { +func (c *Conn) Write(_ context.Context, b []byte) (int, error) { if err := c.session.SendDatagram(b); err != nil { return 0, c.remoteCloseErrHandling(err) } return len(b), nil } -func (c *Conn) LocalAddr() net.Addr { - return c.session.LocalAddr() -} - func (c *Conn) RemoteAddr() net.Addr { return c.session.RemoteAddr() } -func (c *Conn) SetReadDeadline(t time.Time) error { - return nil -} - -func (c *Conn) SetWriteDeadline(t time.Time) error { - return fmt.Errorf("SetWriteDeadline is not implemented") -} - -func (c *Conn) SetDeadline(t time.Time) error { - return fmt.Errorf("SetDeadline is not implemented") -} - func (c *Conn) Close() error { c.closedMu.Lock() if c.closed { @@ -74,8 +51,6 @@ func (c *Conn) Close() error { c.closed = true c.closedMu.Unlock() - c.ctxCancel() // Cancel the context - sessionErr := c.session.CloseWithError(0, "normal closure") return sessionErr } diff --git a/relay/server/listener/quic/listener.go b/relay/server/listener/quic/listener.go index 797223e74..68f0e03c0 100644 --- a/relay/server/listener/quic/listener.go +++ b/relay/server/listener/quic/listener.go @@ -5,12 +5,12 @@ import ( "crypto/tls" "errors" "fmt" - "net" "github.com/quic-go/quic-go" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/protocol" + relaylistener "github.com/netbirdio/netbird/relay/server/listener" nbRelay "github.com/netbirdio/netbird/shared/relay" ) @@ -25,7 +25,7 @@ type Listener struct { listener *quic.Listener } -func (l *Listener) Listen(acceptFn func(conn net.Conn)) error { +func (l *Listener) Listen(acceptFn func(conn relaylistener.Conn)) error { quicCfg := &quic.Config{ EnableDatagrams: true, InitialPacketSize: nbRelay.QUICInitialPacketSize, diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go index d5bce56f7..c22b5719d 100644 --- a/relay/server/listener/ws/conn.go +++ b/relay/server/listener/ws/conn.go @@ -18,25 +18,21 @@ const ( type Conn struct { *websocket.Conn - lAddr *net.TCPAddr rAddr *net.TCPAddr closed bool closedMu sync.Mutex - ctx context.Context } -func NewConn(wsConn *websocket.Conn, lAddr, rAddr *net.TCPAddr) *Conn { +func NewConn(wsConn *websocket.Conn, rAddr *net.TCPAddr) *Conn { return &Conn{ Conn: wsConn, - lAddr: lAddr, rAddr: rAddr, - ctx: context.Background(), } } -func (c *Conn) Read(b []byte) (n int, err error) { - t, r, err := c.Reader(c.ctx) +func (c *Conn) Read(ctx context.Context, b []byte) (n int, err error) { + t, r, err := c.Reader(ctx) if err != nil { return 0, c.ioErrHandling(err) } @@ -56,34 +52,18 @@ func (c *Conn) Read(b []byte) (n int, err error) { // Write writes a binary message with the given payload. // It does not block until fill the internal buffer. // If the buffer filled up, wait until the buffer is drained or timeout. -func (c *Conn) Write(b []byte) (int, error) { - ctx, ctxCancel := context.WithTimeout(c.ctx, writeTimeout) +func (c *Conn) Write(ctx context.Context, b []byte) (int, error) { + ctx, ctxCancel := context.WithTimeout(ctx, writeTimeout) defer ctxCancel() err := c.Conn.Write(ctx, websocket.MessageBinary, b) return len(b), err } -func (c *Conn) LocalAddr() net.Addr { - return c.lAddr -} - func (c *Conn) RemoteAddr() net.Addr { return c.rAddr } -func (c *Conn) SetReadDeadline(t time.Time) error { - return fmt.Errorf("SetReadDeadline is not implemented") -} - -func (c *Conn) SetWriteDeadline(t time.Time) error { - return fmt.Errorf("SetWriteDeadline is not implemented") -} - -func (c *Conn) SetDeadline(t time.Time) error { - return fmt.Errorf("SetDeadline is not implemented") -} - func (c *Conn) Close() error { c.closedMu.Lock() c.closed = true diff --git a/relay/server/listener/ws/listener.go b/relay/server/listener/ws/listener.go index 12219e29b..ba175f901 100644 --- a/relay/server/listener/ws/listener.go +++ b/relay/server/listener/ws/listener.go @@ -7,11 +7,13 @@ import ( "fmt" "net" "net/http" + "time" "github.com/coder/websocket" log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/protocol" + relaylistener "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/shared/relay" ) @@ -27,18 +29,19 @@ type Listener struct { TLSConfig *tls.Config server *http.Server - acceptFn func(conn net.Conn) + acceptFn func(conn relaylistener.Conn) } -func (l *Listener) Listen(acceptFn func(conn net.Conn)) error { +func (l *Listener) Listen(acceptFn func(conn relaylistener.Conn)) error { l.acceptFn = acceptFn mux := http.NewServeMux() mux.HandleFunc(URLPath, l.onAccept) l.server = &http.Server{ - Addr: l.Address, - Handler: mux, - TLSConfig: l.TLSConfig, + Addr: l.Address, + Handler: mux, + TLSConfig: l.TLSConfig, + ReadHeaderTimeout: 5 * time.Second, } log.Infof("WS server listening address: %s", l.Address) @@ -93,18 +96,9 @@ func (l *Listener) onAccept(w http.ResponseWriter, r *http.Request) { return } - lAddr, err := net.ResolveTCPAddr("tcp", l.server.Addr) - if err != nil { - err = wsConn.Close(websocket.StatusInternalError, "internal error") - if err != nil { - log.Errorf("failed to close ws connection: %s", err) - } - return - } - log.Infof("WS client connected from: %s", rAddr) - conn := NewConn(wsConn, lAddr, rAddr) + conn := NewConn(wsConn, rAddr) l.acceptFn(conn) } diff --git a/relay/server/peer.go b/relay/server/peer.go index c5ff41857..8376cdfa7 100644 --- a/relay/server/peer.go +++ b/relay/server/peer.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/relay/metrics" + "github.com/netbirdio/netbird/relay/server/listener" "github.com/netbirdio/netbird/relay/server/store" "github.com/netbirdio/netbird/shared/relay/healthcheck" "github.com/netbirdio/netbird/shared/relay/messages" @@ -26,11 +27,14 @@ type Peer struct { metrics *metrics.Metrics log *log.Entry id messages.PeerID - conn net.Conn + conn listener.Conn connMu sync.RWMutex store *store.Store notifier *store.PeerNotifier + ctx context.Context + ctxCancel context.CancelFunc + peersListener *store.Listener // between the online peer collection step and the notification sending should not be sent offline notifications from another thread @@ -38,14 +42,17 @@ type Peer struct { } // NewPeer creates a new Peer instance and prepare custom logging -func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn net.Conn, store *store.Store, notifier *store.PeerNotifier) *Peer { +func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn listener.Conn, store *store.Store, notifier *store.PeerNotifier) *Peer { + ctx, cancel := context.WithCancel(context.Background()) p := &Peer{ - metrics: metrics, - log: log.WithField("peer_id", id.String()), - id: id, - conn: conn, - store: store, - notifier: notifier, + metrics: metrics, + log: log.WithField("peer_id", id.String()), + id: id, + conn: conn, + store: store, + notifier: notifier, + ctx: ctx, + ctxCancel: cancel, } return p @@ -57,6 +64,7 @@ func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn net.Conn, store func (p *Peer) Work() { p.peersListener = p.notifier.NewListener(p.sendPeersOnline, p.sendPeersWentOffline) defer func() { + p.ctxCancel() p.notifier.RemoveListener(p.peersListener) if err := p.conn.Close(); err != nil && !errors.Is(err, net.ErrClosed) { @@ -64,8 +72,7 @@ func (p *Peer) Work() { } }() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := p.ctx hc := healthcheck.NewSender(p.log) go hc.StartHealthCheck(ctx) @@ -73,7 +80,7 @@ func (p *Peer) Work() { buf := make([]byte, bufferSize) for { - n, err := p.conn.Read(buf) + n, err := p.conn.Read(ctx, buf) if err != nil { if !errors.Is(err, net.ErrClosed) { p.log.Errorf("failed to read message: %s", err) @@ -131,10 +138,10 @@ func (p *Peer) handleMsgType(ctx context.Context, msgType messages.MsgType, hc * } // Write writes data to the connection -func (p *Peer) Write(b []byte) (int, error) { +func (p *Peer) Write(ctx context.Context, b []byte) (int, error) { p.connMu.RLock() defer p.connMu.RUnlock() - return p.conn.Write(b) + return p.conn.Write(ctx, b) } // CloseGracefully closes the connection with the peer gracefully. Send a close message to the client and close the @@ -147,6 +154,7 @@ func (p *Peer) CloseGracefully(ctx context.Context) { p.log.Errorf("failed to send close message to peer: %s", p.String()) } + p.ctxCancel() if err := p.conn.Close(); err != nil { p.log.Errorf(errCloseConn, err) } @@ -156,6 +164,7 @@ func (p *Peer) Close() { p.connMu.Lock() defer p.connMu.Unlock() + p.ctxCancel() if err := p.conn.Close(); err != nil { p.log.Errorf(errCloseConn, err) } @@ -170,26 +179,15 @@ func (p *Peer) writeWithTimeout(ctx context.Context, buf []byte) error { ctx, cancel := context.WithTimeout(ctx, 3*time.Second) defer cancel() - writeDone := make(chan struct{}) - var err error - go func() { - _, err = p.conn.Write(buf) - close(writeDone) - }() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-writeDone: - return err - } + _, err := p.conn.Write(ctx, buf) + return err } func (p *Peer) handleHealthcheckEvents(ctx context.Context, hc *healthcheck.Sender) { for { select { case <-hc.HealthCheck: - _, err := p.Write(messages.MarshalHealthcheck()) + _, err := p.Write(ctx, messages.MarshalHealthcheck()) if err != nil { p.log.Errorf("failed to send healthcheck message: %s", err) return @@ -228,12 +226,12 @@ func (p *Peer) handleTransportMsg(msg []byte) { return } - n, err := dp.Write(msg) + n, err := dp.Write(dp.ctx, msg) if err != nil { p.log.Errorf("failed to write transport message to: %s", dp.String()) return } - p.metrics.TransferBytesSent.Add(context.Background(), int64(n)) + p.metrics.TransferBytesSent.Add(p.ctx, int64(n)) } func (p *Peer) handleSubscribePeerState(msg []byte) { @@ -276,7 +274,7 @@ func (p *Peer) sendPeersOnline(peers []messages.PeerID) { } for n, msg := range msgs { - if _, err := p.Write(msg); err != nil { + if _, err := p.Write(p.ctx, msg); err != nil { p.log.Errorf("failed to write %d. peers offline message: %s", n, err) } } @@ -293,7 +291,7 @@ func (p *Peer) sendPeersWentOffline(peers []messages.PeerID) { } for n, msg := range msgs { - if _, err := p.Write(msg); err != nil { + if _, err := p.Write(p.ctx, msg); err != nil { p.log.Errorf("failed to write %d. peers offline message: %s", n, err) } } diff --git a/relay/server/relay.go b/relay/server/relay.go index bb355f58f..56add8bea 100644 --- a/relay/server/relay.go +++ b/relay/server/relay.go @@ -3,7 +3,6 @@ package server import ( "context" "fmt" - "net" "net/url" "sync" "time" @@ -13,11 +12,20 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/netbirdio/netbird/relay/healthcheck/peerid" + "github.com/netbirdio/netbird/relay/protocol" + "github.com/netbirdio/netbird/relay/server/listener" + //nolint:staticcheck "github.com/netbirdio/netbird/relay/metrics" "github.com/netbirdio/netbird/relay/server/store" ) +type Listener interface { + Listen(func(conn listener.Conn)) error + Shutdown(ctx context.Context) error + Protocol() protocol.Protocol +} + type Config struct { Meter metric.Meter ExposedAddress string @@ -109,7 +117,7 @@ func NewRelay(config Config) (*Relay, error) { } // Accept start to handle a new peer connection -func (r *Relay) Accept(conn net.Conn) { +func (r *Relay) Accept(conn listener.Conn) { acceptTime := time.Now() r.closeMu.RLock() defer r.closeMu.RUnlock() @@ -117,12 +125,15 @@ func (r *Relay) Accept(conn net.Conn) { return } + hsCtx, hsCancel := context.WithTimeout(context.Background(), handshakeTimeout) + defer hsCancel() + h := handshake{ conn: conn, validator: r.validator, preparedMsg: r.preparedMsg, } - peerID, err := h.handshakeReceive() + peerID, err := h.handshakeReceive(hsCtx) if err != nil { if peerid.IsHealthCheck(peerID) { log.Debugf("health check connection from %s", conn.RemoteAddr()) @@ -154,7 +165,7 @@ func (r *Relay) Accept(conn net.Conn) { r.metrics.PeerDisconnected(peer.String()) }() - if err := h.handshakeResponse(); err != nil { + if err := h.handshakeResponse(hsCtx); err != nil { log.Errorf("failed to send handshake response, close peer: %s", err) peer.Close() } diff --git a/relay/server/server.go b/relay/server/server.go index a0f7eb73c..340da55b8 100644 --- a/relay/server/server.go +++ b/relay/server/server.go @@ -3,7 +3,6 @@ package server import ( "context" "crypto/tls" - "net" "net/url" "sync" @@ -31,7 +30,7 @@ type ListenerConfig struct { // In a new HTTP connection, the server will accept the connection and pass it to the Relay server via the Accept method. type Server struct { relay *Relay - listeners []listener.Listener + listeners []Listener listenerMux sync.Mutex } @@ -56,7 +55,7 @@ func NewServer(config Config) (*Server, error) { } return &Server{ relay: relay, - listeners: make([]listener.Listener, 0, 2), + listeners: make([]Listener, 0, 2), }, nil } @@ -86,7 +85,7 @@ func (r *Server) Listen(cfg ListenerConfig) error { wg := sync.WaitGroup{} for _, l := range r.listeners { wg.Add(1) - go func(listener listener.Listener) { + go func(listener Listener) { defer wg.Done() errChan <- listener.Listen(r.relay.Accept) }(l) @@ -139,6 +138,6 @@ func (r *Server) InstanceURL() url.URL { // RelayAccept returns the relay's Accept function for handling incoming connections. // This allows external HTTP handlers to route connections to the relay without // starting the relay's own listeners. -func (r *Server) RelayAccept() func(conn net.Conn) { +func (r *Server) RelayAccept() func(conn listener.Conn) { return r.relay.Accept } diff --git a/release_files/install.sh b/release_files/install.sh index 6a2c5f458..1e71936f3 100755 --- a/release_files/install.sh +++ b/release_files/install.sh @@ -128,7 +128,7 @@ cat <<-EOF | ${SUDO} tee /etc/yum.repos.d/netbird.repo name=NetBird baseurl=https://pkgs.netbird.io/yum/ enabled=1 -gpgcheck=0 +gpgcheck=1 gpgkey=https://pkgs.netbird.io/yum/repodata/repomd.xml.key repo_gpgcheck=1 EOF diff --git a/shared/auth/jwt/validator.go b/shared/auth/jwt/validator.go index aeaa5842c..cf18b2cf6 100644 --- a/shared/auth/jwt/validator.go +++ b/shared/auth/jwt/validator.go @@ -25,7 +25,7 @@ import ( // Jwks is a collection of JSONWebKey obtained from Config.HttpServerConfig.AuthKeysLocation type Jwks struct { Keys []JSONWebKey `json:"keys"` - expiresInTime time.Time + ExpiresInTime time.Time `json:"-"` } // The supported elliptic curves types @@ -53,12 +53,17 @@ type JSONWebKey struct { X5c []string `json:"x5c"` } +// KeyFetcher is a function that retrieves JWKS keys directly (e.g., from Dex storage) +// bypassing HTTP. When set on a Validator, it is used instead of the HTTP-based getPemKeys. +type KeyFetcher func(ctx context.Context) (*Jwks, error) + type Validator struct { lock sync.Mutex issuer string audienceList []string keysLocation string idpSignkeyRefreshEnabled bool + keyFetcher KeyFetcher keys *Jwks lastForcedRefresh time.Time } @@ -85,10 +90,39 @@ func NewValidator(issuer string, audienceList []string, keysLocation string, idp } } +// NewValidatorWithKeyFetcher creates a Validator that fetches keys directly using the +// provided KeyFetcher (e.g., from Dex storage) instead of via HTTP. +func NewValidatorWithKeyFetcher(issuer string, audienceList []string, keyFetcher KeyFetcher) *Validator { + ctx := context.Background() + keys, err := keyFetcher(ctx) + if err != nil { + log.Warnf("could not get keys from key fetcher: %s, it will try again on the next http request", err) + } + if keys == nil { + keys = &Jwks{} + } + + return &Validator{ + keys: keys, + issuer: issuer, + audienceList: audienceList, + idpSignkeyRefreshEnabled: true, + keyFetcher: keyFetcher, + } +} + // forcedRefreshCooldown is the minimum time between forced key refreshes // to prevent abuse from invalid tokens with fake kid values const forcedRefreshCooldown = 30 * time.Second +// fetchKeys retrieves keys using the keyFetcher if available, otherwise falls back to HTTP. +func (v *Validator) fetchKeys(ctx context.Context) (*Jwks, error) { + if v.keyFetcher != nil { + return v.keyFetcher(ctx) + } + return getPemKeys(v.keysLocation) +} + func (v *Validator) getKeyFunc(ctx context.Context) jwt.Keyfunc { return func(token *jwt.Token) (interface{}, error) { // If keys are rotated, verify the keys prior to token validation @@ -131,13 +165,13 @@ func (v *Validator) refreshKeys(ctx context.Context) { v.lock.Lock() defer v.lock.Unlock() - refreshedKeys, err := getPemKeys(v.keysLocation) + refreshedKeys, err := v.fetchKeys(ctx) if err != nil { log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) return } - log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) + log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.ExpiresInTime.UTC()) v.keys = refreshedKeys } @@ -155,13 +189,13 @@ func (v *Validator) forceRefreshKeys(ctx context.Context) bool { log.WithContext(ctx).Debugf("key not found in cache, forcing JWKS refresh") - refreshedKeys, err := getPemKeys(v.keysLocation) + refreshedKeys, err := v.fetchKeys(ctx) if err != nil { log.WithContext(ctx).Debugf("cannot get JSONWebKey: %v, falling back to old keys", err) return false } - log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.expiresInTime.UTC()) + log.WithContext(ctx).Debugf("keys refreshed, new UTC expiration time: %s", refreshedKeys.ExpiresInTime.UTC()) v.keys = refreshedKeys v.lastForcedRefresh = time.Now() return true @@ -203,7 +237,7 @@ func (v *Validator) ValidateAndParse(ctx context.Context, token string) (*jwt.To // stillValid returns true if the JSONWebKey still valid and have enough time to be used func (jwks *Jwks) stillValid() bool { - return !jwks.expiresInTime.IsZero() && time.Now().Add(5*time.Second).Before(jwks.expiresInTime) + return !jwks.ExpiresInTime.IsZero() && time.Now().Add(5*time.Second).Before(jwks.ExpiresInTime) } func getPemKeys(keysLocation string) (*Jwks, error) { @@ -227,7 +261,7 @@ func getPemKeys(keysLocation string) (*Jwks, error) { cacheControlHeader := resp.Header.Get("Cache-Control") expiresIn := getMaxAgeFromCacheHeader(cacheControlHeader) - jwks.expiresInTime = time.Now().Add(time.Duration(expiresIn) * time.Second) + jwks.ExpiresInTime = time.Now().Add(time.Duration(expiresIn) * time.Second) return jwks, nil } diff --git a/shared/management/client/client.go b/shared/management/client/client.go index a15301223..18efba87b 100644 --- a/shared/management/client/client.go +++ b/shared/management/client/client.go @@ -4,8 +4,6 @@ import ( "context" "io" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" @@ -16,14 +14,18 @@ type Client interface { io.Closer Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error - GetServerPublicKey() (*wgtypes.Key, error) - Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - Login(serverKey wgtypes.Key, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) - GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) + Register(setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + Login(sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) + GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error) GetServerURL() string + // IsHealthy returns the current connection status without blocking. + // Used by the engine to monitor connectivity in the background. IsHealthy() bool + // HealthCheck actively probes the management server and returns an error if unreachable. + // Used to validate connectivity before committing configuration changes. + HealthCheck() error SyncMeta(sysInfo *system.Info) error Logout() error CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go index a11f863a7..f5edb6b95 100644 --- a/shared/management/client/client_test.go +++ b/shared/management/client/client_test.go @@ -189,7 +189,7 @@ func closeManagementSilently(s *grpc.Server, listener net.Listener) { } } -func TestClient_GetServerPublicKey(t *testing.T) { +func TestClient_HealthCheck(t *testing.T) { testKey, err := wgtypes.GenerateKey() if err != nil { t.Fatal(err) @@ -203,12 +203,8 @@ func TestClient_GetServerPublicKey(t *testing.T) { t.Fatal(err) } - key, err := client.GetServerPublicKey() - if err != nil { - t.Error("couldn't retrieve management public key") - } - if key == nil { - t.Error("got an empty management public key") + if err := client.HealthCheck(); err != nil { + t.Errorf("health check failed: %v", err) } } @@ -225,12 +221,8 @@ func TestClient_LoginUnregistered_ShouldThrow_401(t *testing.T) { if err != nil { t.Fatal(err) } - key, err := client.GetServerPublicKey() - if err != nil { - t.Fatal(err) - } sysInfo := system.GetInfo(context.TODO()) - _, err = client.Login(*key, sysInfo, nil, nil) + _, err = client.Login(sysInfo, nil, nil) if err == nil { t.Error("expecting err on unregistered login, got nil") } @@ -253,12 +245,8 @@ func TestClient_LoginRegistered(t *testing.T) { t.Fatal(err) } - key, err := client.GetServerPublicKey() - if err != nil { - t.Error(err) - } info := system.GetInfo(context.TODO()) - resp, err := client.Register(*key, ValidKey, "", info, nil, nil) + resp, err := client.Register(ValidKey, "", info, nil, nil) if err != nil { t.Error(err) } @@ -282,13 +270,8 @@ func TestClient_Sync(t *testing.T) { t.Fatal(err) } - serverKey, err := client.GetServerPublicKey() - if err != nil { - t.Error(err) - } - info := system.GetInfo(context.TODO()) - _, err = client.Register(*serverKey, ValidKey, "", info, nil, nil) + _, err = client.Register(ValidKey, "", info, nil, nil) if err != nil { t.Error(err) } @@ -304,7 +287,7 @@ func TestClient_Sync(t *testing.T) { } info = system.GetInfo(context.TODO()) - _, err = remoteClient.Register(*serverKey, ValidKey, "", info, nil, nil) + _, err = remoteClient.Register(ValidKey, "", info, nil, nil) if err != nil { t.Fatal(err) } @@ -364,11 +347,6 @@ func Test_SystemMetaDataFromClient(t *testing.T) { t.Fatalf("error while creating testClient: %v", err) } - key, err := testClient.GetServerPublicKey() - if err != nil { - t.Fatalf("error while getting server public key from testclient, %v", err) - } - var actualMeta *mgmtProto.PeerSystemMeta var actualValidKey string var wg sync.WaitGroup @@ -405,7 +383,7 @@ func Test_SystemMetaDataFromClient(t *testing.T) { } info := system.GetInfo(context.TODO()) - _, err = testClient.Register(*key, ValidKey, "", info, nil, nil) + _, err = testClient.Register(ValidKey, "", info, nil, nil) if err != nil { t.Errorf("error while trying to register client: %v", err) } @@ -505,7 +483,7 @@ func Test_GetDeviceAuthorizationFlow(t *testing.T) { } mgmtMockServer.GetDeviceAuthorizationFlowFunc = func(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) { - encryptedResp, err := encryption.EncryptMessage(serverKey, client.key, expectedFlowInfo) + encryptedResp, err := encryption.EncryptMessage(client.key.PublicKey(), serverKey, expectedFlowInfo) if err != nil { return nil, err } @@ -517,7 +495,7 @@ func Test_GetDeviceAuthorizationFlow(t *testing.T) { }, nil } - flowInfo, err := client.GetDeviceAuthorizationFlow(serverKey) + flowInfo, err := client.GetDeviceAuthorizationFlow() if err != nil { t.Error("error while retrieving device auth flow information") } @@ -551,7 +529,7 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { } mgmtMockServer.GetPKCEAuthorizationFlowFunc = func(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) { - encryptedResp, err := encryption.EncryptMessage(serverKey, client.key, expectedFlowInfo) + encryptedResp, err := encryption.EncryptMessage(client.key.PublicKey(), serverKey, expectedFlowInfo) if err != nil { return nil, err } @@ -563,11 +541,11 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) { }, nil } - flowInfo, err := client.GetPKCEAuthorizationFlow(serverKey) + flowInfo, err := client.GetPKCEAuthorizationFlow() if err != nil { t.Error("error while retrieving pkce auth flow information") } assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientID, flowInfo.ProviderConfig.ClientID, "provider configured client ID should match") - assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientSecret, flowInfo.ProviderConfig.ClientSecret, "provider configured client secret should match") + assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientSecret, flowInfo.ProviderConfig.ClientSecret, "provider configured client secret should match") //nolint:staticcheck } diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go index 252199498..a01e51abc 100644 --- a/shared/management/client/grpc.go +++ b/shared/management/client/grpc.go @@ -202,7 +202,7 @@ func (c *GrpcClient) withMgmtStream( return fmt.Errorf("connection to management is not ready and in %s state", connState) } - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { log.Debugf(errMsgMgmtPublicKey, err) return err @@ -404,7 +404,7 @@ func (c *GrpcClient) handleSyncStream(ctx context.Context, serverPubKey wgtypes. // GetNetworkMap return with the network map func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error) { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { log.Debugf("failed getting Management Service public key: %s", err) return nil, err @@ -490,18 +490,24 @@ func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncCli } } -// GetServerPublicKey returns server's WireGuard public key (used later for encrypting messages sent to the server) -func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) { +// HealthCheck actively probes the management server and returns an error if unreachable. +// Used to validate connectivity before committing configuration changes. +func (c *GrpcClient) HealthCheck() error { if !c.ready() { - return nil, errors.New(errMsgNoMgmtConnection) + return errors.New(errMsgNoMgmtConnection) } + _, err := c.getServerPublicKey() + return err +} + +// getServerPublicKey fetches the server's WireGuard public key. +func (c *GrpcClient) getServerPublicKey() (*wgtypes.Key, error) { mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second) defer cancel() resp, err := c.realClient.GetServerKey(mgmCtx, &proto.Empty{}) if err != nil { - log.Errorf("failed while getting Management Service public key: %v", err) - return nil, fmt.Errorf("failed while getting Management Service public key") + return nil, fmt.Errorf("failed getting Management Service public key: %w", err) } serverKey, err := wgtypes.ParseKey(resp.Key) @@ -512,7 +518,8 @@ func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) { return &serverKey, nil } -// IsHealthy probes the gRPC connection and returns false on errors +// IsHealthy returns the current connection status without blocking. +// Used by the engine to monitor connectivity in the background. func (c *GrpcClient) IsHealthy() bool { switch c.conn.GetState() { case connectivity.TransientFailure: @@ -538,12 +545,17 @@ func (c *GrpcClient) IsHealthy() bool { return true } -func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*proto.LoginResponse, error) { +func (c *GrpcClient) login(req *proto.LoginRequest) (*proto.LoginResponse, error) { if !c.ready() { return nil, errors.New(errMsgNoMgmtConnection) } - loginReq, err := encryption.EncryptMessage(serverKey, c.key, req) + serverKey, err := c.getServerPublicKey() + if err != nil { + return nil, err + } + + loginReq, err := encryption.EncryptMessage(*serverKey, c.key, req) if err != nil { log.Errorf("failed to encrypt message: %s", err) return nil, err @@ -577,7 +589,7 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro } loginResp := &proto.LoginResponse{} - err = encryption.DecryptMessage(serverKey, c.key, resp.Body, loginResp) + err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, loginResp) if err != nil { log.Errorf("failed to decrypt login response: %s", err) return nil, err @@ -589,34 +601,40 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro // Register registers peer on Management Server. It actually calls a Login endpoint with a provided setup key // Takes care of encrypting and decrypting messages. // This method will also collect system info and send it with the request (e.g. hostname, os, etc) -func (c *GrpcClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (c *GrpcClient) Register(setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { keys := &proto.PeerKeys{ SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(serverKey, &proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // Login attempts login to Management Server. Takes care of encrypting and decrypting messages. -func (c *GrpcClient) Login(serverKey wgtypes.Key, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (c *GrpcClient) Login(sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { keys := &proto.PeerKeys{ SshPubKey: pubSSHKey, WgPubKey: []byte(c.key.PublicKey().String()), } - return c.login(serverKey, &proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) + return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()}) } // GetDeviceAuthorizationFlow returns a device authorization flow information. // It also takes care of encrypting and decrypting messages. -func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) { +func (c *GrpcClient) GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) { if !c.ready() { return nil, fmt.Errorf("no connection to management in order to get device authorization flow") } + + serverKey, err := c.getServerPublicKey() + if err != nil { + return nil, err + } + mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2) defer cancel() message := &proto.DeviceAuthorizationFlowRequest{} - encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message) + encryptedMSG, err := encryption.EncryptMessage(*serverKey, c.key, message) if err != nil { return nil, err } @@ -630,7 +648,7 @@ func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.D } flowInfoResp := &proto.DeviceAuthorizationFlow{} - err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp) + err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, flowInfoResp) if err != nil { errWithMSG := fmt.Errorf("failed to decrypt device authorization flow message: %s", err) log.Error(errWithMSG) @@ -642,15 +660,21 @@ func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.D // GetPKCEAuthorizationFlow returns a pkce authorization flow information. // It also takes care of encrypting and decrypting messages. -func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) { +func (c *GrpcClient) GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) { if !c.ready() { return nil, fmt.Errorf("no connection to management in order to get pkce authorization flow") } + + serverKey, err := c.getServerPublicKey() + if err != nil { + return nil, err + } + mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2) defer cancel() message := &proto.PKCEAuthorizationFlowRequest{} - encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message) + encryptedMSG, err := encryption.EncryptMessage(*serverKey, c.key, message) if err != nil { return nil, err } @@ -664,7 +688,7 @@ func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKC } flowInfoResp := &proto.PKCEAuthorizationFlow{} - err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp) + err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, flowInfoResp) if err != nil { errWithMSG := fmt.Errorf("failed to decrypt pkce authorization flow message: %s", err) log.Error(errWithMSG) @@ -681,7 +705,7 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error { return errors.New(errMsgNoMgmtConnection) } - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { log.Debugf(errMsgMgmtPublicKey, err) return err @@ -724,7 +748,7 @@ func (c *GrpcClient) notifyConnected() { } func (c *GrpcClient) Logout() error { - serverKey, err := c.GetServerPublicKey() + serverKey, err := c.getServerPublicKey() if err != nil { return fmt.Errorf("get server public key: %w", err) } @@ -751,7 +775,7 @@ func (c *GrpcClient) Logout() error { // CreateExpose calls the management server to create a new expose service. func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { return nil, err } @@ -787,7 +811,7 @@ func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*Expo // RenewExpose extends the TTL of an active expose session on the management server. func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { return err } @@ -810,7 +834,7 @@ func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error { // StopExpose terminates an active expose session on the management server. func (c *GrpcClient) StopExpose(ctx context.Context, domain string) error { - serverPubKey, err := c.GetServerPublicKey() + serverPubKey, err := c.getServerPublicKey() if err != nil { return err } diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go index 548e379e8..361e8ffad 100644 --- a/shared/management/client/mock.go +++ b/shared/management/client/mock.go @@ -3,8 +3,6 @@ package client import ( "context" - "golang.zx2c4.com/wireguard/wgctrl/wgtypes" - "github.com/netbirdio/netbird/client/system" "github.com/netbirdio/netbird/shared/management/domain" "github.com/netbirdio/netbird/shared/management/proto" @@ -14,12 +12,12 @@ import ( type MockClient struct { CloseFunc func() error SyncFunc func(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error - GetServerPublicKeyFunc func() (*wgtypes.Key, error) - RegisterFunc func(serverKey wgtypes.Key, setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - LoginFunc func(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) - GetDeviceAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) - GetPKCEAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) + RegisterFunc func(setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + LoginFunc func(info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) + GetDeviceAuthorizationFlowFunc func() (*proto.DeviceAuthorizationFlow, error) + GetPKCEAuthorizationFlowFunc func() (*proto.PKCEAuthorizationFlow, error) GetServerURLFunc func() string + HealthCheckFunc func() error SyncMetaFunc func(sysInfo *system.Info) error LogoutFunc func() error JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error @@ -53,39 +51,39 @@ func (m *MockClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequ return m.JobFunc(ctx, msgHandler) } -func (m *MockClient) GetServerPublicKey() (*wgtypes.Key, error) { - if m.GetServerPublicKeyFunc == nil { - return nil, nil - } - return m.GetServerPublicKeyFunc() -} - -func (m *MockClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (m *MockClient) Register(setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { if m.RegisterFunc == nil { return nil, nil } - return m.RegisterFunc(serverKey, setupKey, jwtToken, info, sshKey, dnsLabels) + return m.RegisterFunc(setupKey, jwtToken, info, sshKey, dnsLabels) } -func (m *MockClient) Login(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { +func (m *MockClient) Login(info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) { if m.LoginFunc == nil { return nil, nil } - return m.LoginFunc(serverKey, info, sshKey, dnsLabels) + return m.LoginFunc(info, sshKey, dnsLabels) } -func (m *MockClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) { +func (m *MockClient) GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) { if m.GetDeviceAuthorizationFlowFunc == nil { return nil, nil } - return m.GetDeviceAuthorizationFlowFunc(serverKey) + return m.GetDeviceAuthorizationFlowFunc() } -func (m *MockClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) { +func (m *MockClient) GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) { if m.GetPKCEAuthorizationFlowFunc == nil { return nil, nil } - return m.GetPKCEAuthorizationFlowFunc(serverKey) + return m.GetPKCEAuthorizationFlowFunc() +} + +func (m *MockClient) HealthCheck() error { + if m.HealthCheckFunc == nil { + return nil + } + return m.HealthCheckFunc() } // GetNetworkMap mock implementation of GetNetworkMap from Client interface. diff --git a/shared/management/client/rest/azure_idp.go b/shared/management/client/rest/azure_idp.go new file mode 100644 index 000000000..40b90bc30 --- /dev/null +++ b/shared/management/client/rest/azure_idp.go @@ -0,0 +1,112 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// AzureIDPAPI APIs for Azure AD IDP integrations +type AzureIDPAPI struct { + c *Client +} + +// List retrieves all Azure AD IDP integrations +func (a *AzureIDPAPI) List(ctx context.Context) ([]api.AzureIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/azure-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.AzureIntegration](resp) + return ret, err +} + +// Get retrieves a specific Azure AD IDP integration by ID +func (a *AzureIDPAPI) Get(ctx context.Context, integrationID string) (*api.AzureIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/azure-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.AzureIntegration](resp) + return &ret, err +} + +// Create creates a new Azure AD IDP integration +func (a *AzureIDPAPI) Create(ctx context.Context, request api.CreateAzureIntegrationRequest) (*api.AzureIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/azure-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.AzureIntegration](resp) + return &ret, err +} + +// Update updates an existing Azure AD IDP integration +func (a *AzureIDPAPI) Update(ctx context.Context, integrationID string, request api.UpdateAzureIntegrationRequest) (*api.AzureIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/azure-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.AzureIntegration](resp) + return &ret, err +} + +// Delete deletes an Azure AD IDP integration +func (a *AzureIDPAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/azure-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// Sync triggers a manual sync for an Azure AD IDP integration +func (a *AzureIDPAPI) Sync(ctx context.Context, integrationID string) (*api.SyncResult, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/azure-idp/"+integrationID+"/sync", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.SyncResult](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for an Azure AD IDP integration +func (a *AzureIDPAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/azure-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/azure_idp_test.go b/shared/management/client/rest/azure_idp_test.go new file mode 100644 index 000000000..480d2a313 --- /dev/null +++ b/shared/management/client/rest/azure_idp_test.go @@ -0,0 +1,252 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testAzureIntegration = api.AzureIntegration{ + Id: 1, + Enabled: true, + ClientId: "12345678-1234-1234-1234-123456789012", + TenantId: "87654321-4321-4321-4321-210987654321", + SyncInterval: 300, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + Host: "microsoft.com", + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), +} + +func TestAzureIDP_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.AzureIntegration{testAzureIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testAzureIntegration, ret[0]) + }) +} + +func TestAzureIDP_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestAzureIDP_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testAzureIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testAzureIntegration, *ret) + }) +} + +func TestAzureIDP_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateAzureIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "12345678-1234-1234-1234-123456789012", req.ClientId) + retBytes, _ := json.Marshal(testAzureIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Create(context.Background(), api.CreateAzureIntegrationRequest{ + ClientId: "12345678-1234-1234-1234-123456789012", + ClientSecret: "secret", + TenantId: "87654321-4321-4321-4321-210987654321", + Host: api.CreateAzureIntegrationRequestHostMicrosoftCom, + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testAzureIntegration, *ret) + }) +} + +func TestAzureIDP_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Create(context.Background(), api.CreateAzureIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateAzureIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testAzureIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Update(context.Background(), "int-1", api.UpdateAzureIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testAzureIntegration, *ret) + }) +} + +func TestAzureIDP_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Update(context.Background(), "int-1", api.UpdateAzureIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.AzureIDP.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestAzureIDP_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.AzureIDP.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestAzureIDP_Sync_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(api.SyncResult{Result: ptr("ok")}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Sync(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, "ok", *ret.Result) + }) +} + +func TestAzureIDP_Sync_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.Sync(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestAzureIDP_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestAzureIDP_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/azure-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.AzureIDP.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/client.go b/shared/management/client/rest/client.go index f308761fb..f0cb4d2d1 100644 --- a/shared/management/client/rest/client.go +++ b/shared/management/client/rest/client.go @@ -110,6 +110,15 @@ type Client struct { // see more: https://docs.netbird.io/api/resources/scim SCIM *SCIMAPI + // GoogleIDP NetBird Google Workspace IDP integration APIs + GoogleIDP *GoogleIDPAPI + + // AzureIDP NetBird Azure AD IDP integration APIs + AzureIDP *AzureIDPAPI + + // OktaScimIDP NetBird Okta SCIM IDP integration APIs + OktaScimIDP *OktaScimIDPAPI + // EventStreaming NetBird Event Streaming integration APIs // see more: https://docs.netbird.io/api/resources/event-streaming EventStreaming *EventStreamingAPI @@ -185,6 +194,9 @@ func (c *Client) initialize() { c.MSP = &MSPAPI{c} c.EDR = &EDRAPI{c} c.SCIM = &SCIMAPI{c} + c.GoogleIDP = &GoogleIDPAPI{c} + c.AzureIDP = &AzureIDPAPI{c} + c.OktaScimIDP = &OktaScimIDPAPI{c} c.EventStreaming = &EventStreamingAPI{c} c.IdentityProviders = &IdentityProvidersAPI{c} c.Ingress = &IngressAPI{c} diff --git a/shared/management/client/rest/edr.go b/shared/management/client/rest/edr.go index 7dfc891c2..f9b7f2a88 100644 --- a/shared/management/client/rest/edr.go +++ b/shared/management/client/rest/edr.go @@ -265,6 +265,65 @@ func (a *EDRAPI) DeleteHuntressIntegration(ctx context.Context) error { return nil } +// GetFleetDMIntegration retrieves the EDR FleetDM integration. +func (a *EDRAPI) GetFleetDMIntegration(ctx context.Context) (*api.EDRFleetDMResponse, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/edr/fleetdm", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFleetDMResponse](resp) + return &ret, err +} + +// CreateFleetDMIntegration creates a new EDR FleetDM integration. +func (a *EDRAPI) CreateFleetDMIntegration(ctx context.Context, request api.EDRFleetDMRequest) (*api.EDRFleetDMResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/edr/fleetdm", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFleetDMResponse](resp) + return &ret, err +} + +// UpdateFleetDMIntegration updates an existing EDR FleetDM integration. +func (a *EDRAPI) UpdateFleetDMIntegration(ctx context.Context, request api.EDRFleetDMRequest) (*api.EDRFleetDMResponse, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/edr/fleetdm", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.EDRFleetDMResponse](resp) + return &ret, err +} + +// DeleteFleetDMIntegration deletes the EDR FleetDM integration. +func (a *EDRAPI) DeleteFleetDMIntegration(ctx context.Context) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/edr/fleetdm", nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + // BypassPeerCompliance bypasses compliance for a non-compliant peer // See more: https://docs.netbird.io/api/resources/edr#bypass-peer-compliance func (a *EDRAPI) BypassPeerCompliance(ctx context.Context, peerID string) (*api.BypassResponse, error) { diff --git a/shared/management/client/rest/google_idp.go b/shared/management/client/rest/google_idp.go new file mode 100644 index 000000000..b86436503 --- /dev/null +++ b/shared/management/client/rest/google_idp.go @@ -0,0 +1,112 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// GoogleIDPAPI APIs for Google Workspace IDP integrations +type GoogleIDPAPI struct { + c *Client +} + +// List retrieves all Google Workspace IDP integrations +func (a *GoogleIDPAPI) List(ctx context.Context) ([]api.GoogleIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/google-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.GoogleIntegration](resp) + return ret, err +} + +// Get retrieves a specific Google Workspace IDP integration by ID +func (a *GoogleIDPAPI) Get(ctx context.Context, integrationID string) (*api.GoogleIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/google-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GoogleIntegration](resp) + return &ret, err +} + +// Create creates a new Google Workspace IDP integration +func (a *GoogleIDPAPI) Create(ctx context.Context, request api.CreateGoogleIntegrationRequest) (*api.GoogleIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/google-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GoogleIntegration](resp) + return &ret, err +} + +// Update updates an existing Google Workspace IDP integration +func (a *GoogleIDPAPI) Update(ctx context.Context, integrationID string, request api.UpdateGoogleIntegrationRequest) (*api.GoogleIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/google-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.GoogleIntegration](resp) + return &ret, err +} + +// Delete deletes a Google Workspace IDP integration +func (a *GoogleIDPAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/google-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// Sync triggers a manual sync for a Google Workspace IDP integration +func (a *GoogleIDPAPI) Sync(ctx context.Context, integrationID string) (*api.SyncResult, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/google-idp/"+integrationID+"/sync", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.SyncResult](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for a Google Workspace IDP integration +func (a *GoogleIDPAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/google-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/google_idp_test.go b/shared/management/client/rest/google_idp_test.go new file mode 100644 index 000000000..03a6c161e --- /dev/null +++ b/shared/management/client/rest/google_idp_test.go @@ -0,0 +1,248 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testGoogleIntegration = api.GoogleIntegration{ + Id: 1, + Enabled: true, + CustomerId: "C01234567", + SyncInterval: 300, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), +} + +func TestGoogleIDP_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.GoogleIntegration{testGoogleIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testGoogleIntegration, ret[0]) + }) +} + +func TestGoogleIDP_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestGoogleIDP_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testGoogleIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testGoogleIntegration, *ret) + }) +} + +func TestGoogleIDP_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateGoogleIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "C01234567", req.CustomerId) + retBytes, _ := json.Marshal(testGoogleIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Create(context.Background(), api.CreateGoogleIntegrationRequest{ + CustomerId: "C01234567", + ServiceAccountKey: "key-data", + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testGoogleIntegration, *ret) + }) +} + +func TestGoogleIDP_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Create(context.Background(), api.CreateGoogleIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateGoogleIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testGoogleIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Update(context.Background(), "int-1", api.UpdateGoogleIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testGoogleIntegration, *ret) + }) +} + +func TestGoogleIDP_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Update(context.Background(), "int-1", api.UpdateGoogleIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.GoogleIDP.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestGoogleIDP_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.GoogleIDP.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestGoogleIDP_Sync_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(api.SyncResult{Result: ptr("ok")}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Sync(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, "ok", *ret.Result) + }) +} + +func TestGoogleIDP_Sync_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/sync", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.Sync(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestGoogleIDP_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestGoogleIDP_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/google-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.GoogleIDP.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/client/rest/okta_scim_idp.go b/shared/management/client/rest/okta_scim_idp.go new file mode 100644 index 000000000..eb677dae8 --- /dev/null +++ b/shared/management/client/rest/okta_scim_idp.go @@ -0,0 +1,112 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/netbirdio/netbird/shared/management/http/api" +) + +// OktaScimIDPAPI APIs for Okta SCIM IDP integrations +type OktaScimIDPAPI struct { + c *Client +} + +// List retrieves all Okta SCIM IDP integrations +func (a *OktaScimIDPAPI) List(ctx context.Context) ([]api.OktaScimIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/okta-scim-idp", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.OktaScimIntegration](resp) + return ret, err +} + +// Get retrieves a specific Okta SCIM IDP integration by ID +func (a *OktaScimIDPAPI) Get(ctx context.Context, integrationID string) (*api.OktaScimIntegration, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/okta-scim-idp/"+integrationID, nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.OktaScimIntegration](resp) + return &ret, err +} + +// Create creates a new Okta SCIM IDP integration +func (a *OktaScimIDPAPI) Create(ctx context.Context, request api.CreateOktaScimIntegrationRequest) (*api.OktaScimIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/okta-scim-idp", bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.OktaScimIntegration](resp) + return &ret, err +} + +// Update updates an existing Okta SCIM IDP integration +func (a *OktaScimIDPAPI) Update(ctx context.Context, integrationID string, request api.UpdateOktaScimIntegrationRequest) (*api.OktaScimIntegration, error) { + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, err + } + resp, err := a.c.NewRequest(ctx, "PUT", "/api/integrations/okta-scim-idp/"+integrationID, bytes.NewReader(requestBytes), nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.OktaScimIntegration](resp) + return &ret, err +} + +// Delete deletes an Okta SCIM IDP integration +func (a *OktaScimIDPAPI) Delete(ctx context.Context, integrationID string) error { + resp, err := a.c.NewRequest(ctx, "DELETE", "/api/integrations/okta-scim-idp/"+integrationID, nil, nil) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + return nil +} + +// RegenerateToken regenerates the SCIM API token for an Okta SCIM integration +func (a *OktaScimIDPAPI) RegenerateToken(ctx context.Context, integrationID string) (*api.ScimTokenResponse, error) { + resp, err := a.c.NewRequest(ctx, "POST", "/api/integrations/okta-scim-idp/"+integrationID+"/token", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[api.ScimTokenResponse](resp) + return &ret, err +} + +// GetLogs retrieves synchronization logs for an Okta SCIM IDP integration +func (a *OktaScimIDPAPI) GetLogs(ctx context.Context, integrationID string) ([]api.IdpIntegrationSyncLog, error) { + resp, err := a.c.NewRequest(ctx, "GET", "/api/integrations/okta-scim-idp/"+integrationID+"/logs", nil, nil) + if err != nil { + return nil, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + ret, err := parseResponse[[]api.IdpIntegrationSyncLog](resp) + return ret, err +} diff --git a/shared/management/client/rest/okta_scim_idp_test.go b/shared/management/client/rest/okta_scim_idp_test.go new file mode 100644 index 000000000..d8d1f2b51 --- /dev/null +++ b/shared/management/client/rest/okta_scim_idp_test.go @@ -0,0 +1,246 @@ +//go:build integration + +package rest_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/shared/management/client/rest" + "github.com/netbirdio/netbird/shared/management/http/api" + "github.com/netbirdio/netbird/shared/management/http/util" +) + +var testOktaScimIntegration = api.OktaScimIntegration{ + Id: 1, + AuthToken: "****", + Enabled: true, + GroupPrefixes: []string{"eng-"}, + UserGroupPrefixes: []string{"dev-"}, + LastSyncedAt: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), +} + +func TestOktaScimIDP_List_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.OktaScimIntegration{testOktaScimIntegration}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.List(context.Background()) + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testOktaScimIntegration, ret[0]) + }) +} + +func TestOktaScimIDP_List_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.List(context.Background()) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Empty(t, ret) + }) +} + +func TestOktaScimIDP_Get_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal(testOktaScimIntegration) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Get(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testOktaScimIntegration, *ret) + }) +} + +func TestOktaScimIDP_Get_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Get(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_Create_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.CreateOktaScimIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, "my-okta-connection", req.ConnectionName) + retBytes, _ := json.Marshal(testOktaScimIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Create(context.Background(), api.CreateOktaScimIntegrationRequest{ + ConnectionName: "my-okta-connection", + GroupPrefixes: &[]string{"eng-"}, + }) + require.NoError(t, err) + assert.Equal(t, testOktaScimIntegration, *ret) + }) +} + +func TestOktaScimIDP_Create_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Create(context.Background(), api.CreateOktaScimIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_Update_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "PUT", r.Method) + reqBytes, err := io.ReadAll(r.Body) + require.NoError(t, err) + var req api.UpdateOktaScimIntegrationRequest + err = json.Unmarshal(reqBytes, &req) + require.NoError(t, err) + assert.Equal(t, true, *req.Enabled) + retBytes, _ := json.Marshal(testOktaScimIntegration) + _, err = w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Update(context.Background(), "int-1", api.UpdateOktaScimIntegrationRequest{ + Enabled: ptr(true), + }) + require.NoError(t, err) + assert.Equal(t, testOktaScimIntegration, *ret) + }) +} + +func TestOktaScimIDP_Update_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) + w.WriteHeader(400) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.Update(context.Background(), "int-1", api.UpdateOktaScimIntegrationRequest{}) + assert.Error(t, err) + assert.Equal(t, "No", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_Delete_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "DELETE", r.Method) + w.WriteHeader(200) + }) + err := c.OktaScimIDP.Delete(context.Background(), "int-1") + require.NoError(t, err) + }) +} + +func TestOktaScimIDP_Delete_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + err := c.OktaScimIDP.Delete(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + }) +} + +func TestOktaScimIDP_RegenerateToken_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/token", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "POST", r.Method) + retBytes, _ := json.Marshal(testScimToken) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.RegenerateToken(context.Background(), "int-1") + require.NoError(t, err) + assert.Equal(t, testScimToken, *ret) + }) +} + +func TestOktaScimIDP_RegenerateToken_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/token", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.RegenerateToken(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Nil(t, ret) + }) +} + +func TestOktaScimIDP_GetLogs_200(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + retBytes, _ := json.Marshal([]api.IdpIntegrationSyncLog{testSyncLog}) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.GetLogs(context.Background(), "int-1") + require.NoError(t, err) + assert.Len(t, ret, 1) + assert.Equal(t, testSyncLog, ret[0]) + }) +} + +func TestOktaScimIDP_GetLogs_Err(t *testing.T) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { + mux.HandleFunc("/api/integrations/okta-scim-idp/int-1/logs", func(w http.ResponseWriter, r *http.Request) { + retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) + w.WriteHeader(404) + _, err := w.Write(retBytes) + require.NoError(t, err) + }) + ret, err := c.OktaScimIDP.GetLogs(context.Background(), "int-1") + assert.Error(t, err) + assert.Equal(t, "Not found", err.Error()) + assert.Empty(t, ret) + }) +} diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml index 14cb5a503..d0e3413d4 100644 --- a/shared/management/http/api/openapi.yml +++ b/shared/management/http/api/openapi.yml @@ -68,8 +68,17 @@ tags: - name: MSP description: MSP portal for Tenant management. x-cloud-only: true - - name: IDP - description: Manage identity provider integrations for user and group sync. + - name: IDP SCIM Integrations + description: Manage generic SCIM identity provider integrations for user and group sync. + x-cloud-only: true + - name: IDP Google Integrations + description: Manage Google Workspace identity provider integrations for user and group sync. + x-cloud-only: true + - name: IDP Azure Integrations + description: Manage Azure AD identity provider integrations for user and group sync. + x-cloud-only: true + - name: IDP Okta SCIM Integrations + description: Manage Okta SCIM identity provider integrations for user and group sync. x-cloud-only: true - name: EDR Intune Integrations description: Manage Microsoft Intune EDR integrations. @@ -83,12 +92,19 @@ tags: - name: EDR Huntress Integrations description: Manage Huntress EDR integrations. x-cloud-only: true + - name: EDR FleetDM Integrations + description: Manage FleetDM EDR integrations. + x-cloud-only: true - name: EDR Peers description: Manage EDR compliance bypass for peers. x-cloud-only: true - name: Event Streaming Integrations description: Manage event streaming integrations. x-cloud-only: true + - name: Notifications + description: Manage notification channels for account event alerts. + x-cloud-only: true + components: schemas: @@ -2995,6 +3011,11 @@ components: type: boolean description: Whether the service is enabled example: true + terminated: + type: boolean + description: Whether the service has been terminated. Terminated services cannot be updated. Services that violate the Terms of Service will be terminated. + readOnly: true + example: false pass_host_header: type: boolean description: When true, the original client Host header is passed through to the backend instead of being rewritten to the backend's address @@ -4318,75 +4339,129 @@ components: description: Status of agent firewall. Can be one of Disabled, Enabled, Pending Isolation, Isolated, Pending Release. example: "Enabled" - CreateScimIntegrationRequest: + EDRFleetDMRequest: type: object - description: Request payload for creating an SCIM IDP integration - required: - - prefix - - provider + description: Request payload for creating or updating a FleetDM EDR integration properties: - prefix: + api_url: type: string - description: The connection prefix used for the SCIM provider - provider: + description: FleetDM server URL + api_token: type: string - description: Name of the SCIM identity provider - group_prefixes: + description: FleetDM API token + groups: type: array - description: List of start_with string patterns for groups to sync + description: The Groups this integrations applies to items: type: string - example: [ "Engineering", "Sales" ] - user_group_prefixes: - type: array - description: List of start_with string patterns for groups which users to sync - items: - type: string - example: [ "Users" ] - UpdateScimIntegrationRequest: - type: object - description: Request payload for updating an SCIM IDP integration - properties: + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. Minimum value is 24 hours + minimum: 24 enabled: type: boolean description: Indicates whether the integration is enabled - example: true - group_prefixes: - type: array - description: List of start_with string patterns for groups to sync - items: - type: string - example: [ "Engineering", "Sales" ] - user_group_prefixes: - type: array - description: List of start_with string patterns for groups which users to sync - items: - type: string - example: [ "Users" ] - ScimIntegration: + default: true + match_attributes: + $ref: '#/components/schemas/FleetDMMatchAttributes' + required: + - api_url + - api_token + - groups + - last_synced_interval + - match_attributes + EDRFleetDMResponse: type: object - description: Represents a SCIM IDP integration + description: Represents a FleetDM EDR integration configuration required: - id - - enabled - - provider - - group_prefixes - - user_group_prefixes - - auth_token + - account_id + - api_url + - created_by - last_synced_at + - created_at + - updated_at + - groups + - last_synced_interval + - match_attributes + - enabled properties: id: type: integer format: int64 - description: The unique identifier for the integration + description: The unique numeric identifier for the integration. example: 123 + account_id: + type: string + description: The identifier of the account this integration belongs to. + example: "ch8i4ug6lnn4g9hqv7l0" + api_url: + type: string + description: FleetDM server URL + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced. + example: "2023-05-15T10:30:00Z" + created_by: + type: string + description: The user id that created the integration + created_at: + type: string + format: date-time + description: Timestamp of when the integration was created. + example: "2023-05-15T10:30:00Z" + updated_at: + type: string + format: date-time + description: Timestamp of when the integration was last updated. + example: "2023-05-16T11:45:00Z" + groups: + type: array + description: List of groups + items: + $ref: '#/components/schemas/Group' + last_synced_interval: + type: integer + description: The devices last sync requirement interval in hours. enabled: type: boolean description: Indicates whether the integration is enabled - example: true - provider: - type: string - description: Name of the SCIM identity provider + default: true + match_attributes: + $ref: '#/components/schemas/FleetDMMatchAttributes' + + FleetDMMatchAttributes: + type: object + description: Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly + additionalProperties: false + properties: + disk_encryption_enabled: + type: boolean + description: Whether disk encryption (FileVault/BitLocker) must be enabled on the host + failing_policies_count_max: + type: integer + description: Maximum number of allowed failing policies. Use 0 to require all policies to pass + minimum: 0 + example: 0 + vulnerable_software_count_max: + type: integer + description: Maximum number of allowed vulnerable software on the host + minimum: 0 + example: 0 + status_online: + type: boolean + description: Whether the host must be online (recently seen by Fleet) + required_policies: + type: array + description: List of FleetDM policy IDs that must be passing on the host. If any of these policies is failing, the host is non-compliant + items: + type: integer + example: [1, 5, 12] + + IntegrationSyncFilters: + type: object + properties: group_prefixes: type: array description: List of start_with string patterns for groups to sync @@ -4399,15 +4474,77 @@ components: items: type: string example: [ "Users" ] - auth_token: + connector_id: type: string - description: SCIM API token (full on creation, masked otherwise) - example: "nbs_abc***********************************" - last_synced_at: - type: string - format: date-time - description: Timestamp of when the integration was last synced - example: "2023-05-15T10:30:00Z" + description: DEX connector ID for embedded IDP setups + IntegrationEnabled: + type: object + properties: + enabled: + type: boolean + description: Whether the integration is enabled + example: true + CreateScimIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating an SCIM IDP integration + required: + - prefix + - provider + properties: + prefix: + type: string + description: The connection prefix used for the SCIM provider + provider: + type: string + description: Name of the SCIM identity provider + UpdateScimIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating an SCIM IDP integration + properties: + prefix: + type: string + description: The connection prefix used for the SCIM provider + ScimIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents a SCIM IDP integration + required: + - id + - enabled + - prefix + - provider + - group_prefixes + - user_group_prefixes + - auth_token + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 123 + prefix: + type: string + description: The connection prefix used for the SCIM provider + provider: + type: string + description: Name of the SCIM identity provider + auth_token: + type: string + description: SCIM API token (full on creation, masked otherwise) + example: "nbs_abc***********************************" + last_synced_at: + type: string + format: date-time + description: Timestamp of when the integration was last synced + example: "2023-05-15T10:30:00Z" IdpIntegrationSyncLog: type: object description: Represents a synchronization log entry for an integration @@ -4445,6 +4582,346 @@ components: type: string description: The newly generated SCIM API token example: "nbs_F3f0d..." + CreateGoogleIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating a Google Workspace IDP integration + required: + - service_account_key + - customer_id + properties: + service_account_key: + type: string + description: Base64-encoded Google service account key + example: "eyJ0eXBlIjoic2VydmljZV9hY2NvdW50Ii..." + customer_id: + type: string + description: Customer ID from Google Workspace Account Settings + example: "C01234567" + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + minimum: 300 + example: 300 + UpdateGoogleIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating a Google Workspace IDP integration. All fields are optional. + properties: + service_account_key: + type: string + description: Base64-encoded Google service account key + customer_id: + type: string + description: Customer ID from Google Workspace Account Settings + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300) + minimum: 300 + GoogleIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents a Google Workspace IDP integration + required: + - id + - customer_id + - sync_interval + - enabled + - group_prefixes + - user_group_prefixes + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 1 + customer_id: + type: string + description: Customer ID from Google Workspace + example: "C01234567" + sync_interval: + type: integer + description: Sync interval in seconds + example: 300 + last_synced_at: + type: string + format: date-time + description: Timestamp of the last synchronization + example: "2023-05-15T10:30:00Z" + CreateAzureIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating an Azure AD IDP integration + required: + - client_secret + - client_id + - tenant_id + - host + properties: + client_secret: + type: string + description: Base64-encoded Azure AD client secret + example: "c2VjcmV0..." + client_id: + type: string + description: Azure AD application (client) ID + example: "12345678-1234-1234-1234-123456789012" + tenant_id: + type: string + description: Azure AD tenant ID + example: "87654321-4321-4321-4321-210987654321" + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + minimum: 300 + example: 300 + host: + type: string + description: Azure host domain for the Graph API + enum: + - microsoft.com + - microsoft.us + example: "microsoft.com" + UpdateAzureIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating an Azure AD IDP integration. All fields are optional. + properties: + client_secret: + type: string + description: Base64-encoded Azure AD client secret + client_id: + type: string + description: Azure AD application (client) ID + tenant_id: + type: string + description: Azure AD tenant ID + sync_interval: + type: integer + description: Sync interval in seconds (minimum 300) + minimum: 300 + AzureIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents an Azure AD IDP integration + required: + - id + - client_id + - tenant_id + - sync_interval + - enabled + - group_prefixes + - user_group_prefixes + - host + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 1 + client_id: + type: string + description: Azure AD application (client) ID + example: "12345678-1234-1234-1234-123456789012" + tenant_id: + type: string + description: Azure AD tenant ID + example: "87654321-4321-4321-4321-210987654321" + sync_interval: + type: integer + description: Sync interval in seconds + example: 300 + host: + type: string + description: Azure host domain for the Graph API + example: "microsoft.com" + last_synced_at: + type: string + format: date-time + description: Timestamp of the last synchronization + example: "2023-05-15T10:30:00Z" + CreateOktaScimIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for creating an Okta SCIM IDP integration + required: + - connection_name + properties: + connection_name: + type: string + description: The Okta enterprise connection name on Auth0 + example: "my-okta-connection" + UpdateOktaScimIntegrationRequest: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Request payload for updating an Okta SCIM IDP integration. All fields are optional. + OktaScimIntegration: + allOf: + - $ref: '#/components/schemas/IntegrationEnabled' + - $ref: '#/components/schemas/IntegrationSyncFilters' + - type: object + description: Represents an Okta SCIM IDP integration + required: + - id + - enabled + - group_prefixes + - user_group_prefixes + - auth_token + - last_synced_at + properties: + id: + type: integer + format: int64 + description: The unique identifier for the integration + example: 1 + auth_token: + type: string + description: SCIM API token (full on creation/regeneration, masked on retrieval) + example: "nbs_abc***********************************" + last_synced_at: + type: string + format: date-time + description: Timestamp of the last synchronization + example: "2023-05-15T10:30:00Z" + SyncResult: + type: object + description: Response for a manual sync trigger + properties: + result: + type: string + example: "ok" + NotificationChannelType: + type: string + description: The type of notification channel. + enum: + - email + - webhook + example: "email" + NotificationEventType: + type: string + description: | + An activity event type code. See `GET /api/integrations/notifications/types` for the full list + of supported event types and their human-readable descriptions. + example: "user.join" + EmailTarget: + type: object + description: Target configuration for email notification channels. + properties: + emails: + type: array + description: List of email addresses to send notifications to. + minItems: 1 + items: + type: string + format: email + example: [ "admin@example.com", "ops@example.com" ] + required: + - emails + WebhookTarget: + type: object + description: Target configuration for webhook notification channels. + properties: + url: + type: string + format: uri + description: The webhook endpoint URL to send notifications to. + example: "https://hooks.example.com/netbird" + headers: + type: object + additionalProperties: + type: string + description: | + Custom HTTP headers sent with each webhook request. + Values are write-only; in GET responses all values are masked. + example: + Authorization: "Bearer token" + X-Webhook-Secret: "secret" + required: + - url + NotificationChannelRequest: + type: object + description: Request body for creating or updating a notification channel. + properties: + type: + $ref: '#/components/schemas/NotificationChannelType' + target: + description: | + Channel-specific target configuration. The shape depends on the `type` field: + - `email`: requires an `EmailTarget` object + - `webhook`: requires a `WebhookTarget` object + oneOf: + - $ref: '#/components/schemas/EmailTarget' + - $ref: '#/components/schemas/WebhookTarget' + event_types: + type: array + description: List of activity event type codes this channel subscribes to. + items: + $ref: '#/components/schemas/NotificationEventType' + example: [ "user.join", "peer.user.add", "peer.login.expire" ] + enabled: + type: boolean + description: Whether this notification channel is active. + example: true + required: + - type + - event_types + - enabled + NotificationChannelResponse: + type: object + description: A notification channel configuration. + properties: + id: + type: string + description: Unique identifier of the notification channel. + readOnly: true + example: "ch8i4ug6lnn4g9hqv7m0" + type: + $ref: '#/components/schemas/NotificationChannelType' + target: + description: | + Channel-specific target configuration. The shape depends on the `type` field: + - `email`: an `EmailTarget` object + - `webhook`: a `WebhookTarget` object + oneOf: + - $ref: '#/components/schemas/EmailTarget' + - $ref: '#/components/schemas/WebhookTarget' + event_types: + type: array + description: List of activity event type codes this channel subscribes to. + items: + $ref: '#/components/schemas/NotificationEventType' + example: [ "user.join", "peer.user.add", "peer.login.expire" ] + enabled: + type: boolean + description: Whether this notification channel is active. + example: true + required: + - id + - type + - event_types + - enabled + NotificationTypeEntry: + type: object + description: A map of event type codes to their human-readable descriptions. + additionalProperties: + type: string + example: + user.join: "User joined" BypassResponse: type: object description: Response for bypassed peer operations. @@ -9081,10 +9558,877 @@ paths: schema: $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp: + post: + tags: + - IDP Google Integrations + summary: Create Google IDP Integration + description: Creates a new Google Workspace IDP integration + operationId: createGoogleIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateGoogleIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/GoogleIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP Google Integrations + summary: Get All Google IDP Integrations + description: Retrieves all Google Workspace IDP integrations for the authenticated account + operationId: getAllGoogleIntegrations + responses: + '200': + description: A list of Google IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/GoogleIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Google IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Google Integrations + summary: Get Google IDP Integration + description: Retrieves a Google IDP integration by ID. + operationId: getGoogleIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/GoogleIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP Google Integrations + summary: Update Google IDP Integration + description: Updates an existing Google Workspace IDP integration. + operationId: updateGoogleIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateGoogleIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/GoogleIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP Google Integrations + summary: Delete Google IDP Integration + description: Deletes a Google IDP integration by ID. + operationId: deleteGoogleIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp/{id}/sync: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Google IDP integration. + schema: + type: integer + format: int64 + example: 1 + post: + tags: + - IDP Google Integrations + summary: Sync Google IDP Integration + description: Triggers a manual synchronization for a Google IDP integration. + operationId: syncGoogleIntegration + responses: + '200': + description: Sync triggered successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/SyncResult' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/google-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Google IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Google Integrations + summary: Get Google Integration Sync Logs + description: Retrieves synchronization logs for a Google IDP integration. + operationId: getGoogleIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp: + post: + tags: + - IDP Azure Integrations + summary: Create Azure IDP Integration + description: Creates a new Azure AD IDP integration + operationId: createAzureIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAzureIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/AzureIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP Azure Integrations + summary: Get All Azure IDP Integrations + description: Retrieves all Azure AD IDP integrations for the authenticated account + operationId: getAllAzureIntegrations + responses: + '200': + description: A list of Azure IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/AzureIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Azure IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Azure Integrations + summary: Get Azure IDP Integration + description: Retrieves an Azure IDP integration by ID. + operationId: getAzureIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/AzureIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP Azure Integrations + summary: Update Azure IDP Integration + description: Updates an existing Azure AD IDP integration. + operationId: updateAzureIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateAzureIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/AzureIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP Azure Integrations + summary: Delete Azure IDP Integration + description: Deletes an Azure IDP integration by ID. + operationId: deleteAzureIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp/{id}/sync: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Azure IDP integration. + schema: + type: integer + format: int64 + example: 1 + post: + tags: + - IDP Azure Integrations + summary: Sync Azure IDP Integration + description: Triggers a manual synchronization for an Azure IDP integration. + operationId: syncAzureIntegration + responses: + '200': + description: Sync triggered successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/SyncResult' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/azure-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Azure IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Azure Integrations + summary: Get Azure Integration Sync Logs + description: Retrieves synchronization logs for an Azure IDP integration. + operationId: getAzureIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp: + post: + tags: + - IDP Okta SCIM Integrations + summary: Create Okta SCIM IDP Integration + description: Creates a new Okta SCIM IDP integration + operationId: createOktaScimIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateOktaScimIntegrationRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/OktaScimIntegration' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - IDP Okta SCIM Integrations + summary: Get All Okta SCIM IDP Integrations + description: Retrieves all Okta SCIM IDP integrations for the authenticated account + operationId: getAllOktaScimIntegrations + responses: + '200': + description: A list of Okta SCIM IDP integrations. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/OktaScimIntegration' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp/{id}: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Okta SCIM IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Okta SCIM Integrations + summary: Get Okta SCIM IDP Integration + description: Retrieves an Okta SCIM IDP integration by ID. + operationId: getOktaScimIntegration + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/OktaScimIntegration' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - IDP Okta SCIM Integrations + summary: Update Okta SCIM IDP Integration + description: Updates an existing Okta SCIM IDP integration. + operationId: updateOktaScimIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateOktaScimIntegrationRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/OktaScimIntegration' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - IDP Okta SCIM Integrations + summary: Delete Okta SCIM IDP Integration + description: Deletes an Okta SCIM IDP integration by ID. + operationId: deleteOktaScimIntegration + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp/{id}/token: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Okta SCIM IDP integration. + schema: + type: integer + format: int64 + example: 1 + post: + tags: + - IDP Okta SCIM Integrations + summary: Regenerate Okta SCIM Token + description: Regenerates the SCIM API token for an Okta SCIM IDP integration. + operationId: regenerateOktaScimToken + responses: + '200': + description: Token regenerated successfully. Returns the new token. + content: + application/json: + schema: + $ref: '#/components/schemas/ScimTokenResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/integrations/okta-scim-idp/{id}/logs: + parameters: + - name: id + in: path + required: true + description: The unique identifier of the Okta SCIM IDP integration. + schema: + type: integer + format: int64 + example: 1 + get: + tags: + - IDP Okta SCIM Integrations + summary: Get Okta SCIM Integration Sync Logs + description: Retrieves synchronization logs for an Okta SCIM IDP integration. + operationId: getOktaScimIntegrationLogs + responses: + '200': + description: Successfully retrieved the integration sync logs. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/IdpIntegrationSyncLog' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' /api/integrations/scim-idp: post: tags: - - IDP + - IDP SCIM Integrations summary: Create SCIM IDP Integration description: Creates a new SCIM integration operationId: createSCIMIntegration @@ -9121,7 +10465,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' get: tags: - - IDP + - IDP SCIM Integrations summary: Get All SCIM IDP Integrations description: Retrieves all SCIM IDP integrations for the authenticated account operationId: getAllSCIMIntegrations @@ -9153,11 +10497,12 @@ paths: required: true description: The unique identifier of the SCIM IDP integration. schema: - type: string - example: "ch8i4ug6lnn4g9hqv7m0" + type: integer + format: int64 + example: 1 get: tags: - - IDP + - IDP SCIM Integrations summary: Get SCIM IDP Integration description: Retrieves an SCIM IDP integration by ID. operationId: getSCIMIntegration @@ -9194,7 +10539,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' put: tags: - - IDP + - IDP SCIM Integrations summary: Update SCIM IDP Integration description: Updates an existing SCIM IDP Integration. operationId: updateSCIMIntegration @@ -9237,7 +10582,7 @@ paths: $ref: '#/components/schemas/ErrorResponse' delete: tags: - - IDP + - IDP SCIM Integrations summary: Delete SCIM IDP Integration description: Deletes an SCIM IDP integration by ID. operationId: deleteSCIMIntegration @@ -9280,11 +10625,12 @@ paths: required: true description: The unique identifier of the SCIM IDP integration. schema: - type: string - example: "ch8i4ug6lnn4g9hqv7m0" + type: integer + format: int64 + example: 1 post: tags: - - IDP + - IDP SCIM Integrations summary: Regenerate SCIM Token description: Regenerates the SCIM API token for an SCIM IDP integration. operationId: regenerateSCIMToken @@ -9326,11 +10672,12 @@ paths: required: true description: The unique identifier of the SCIM IDP integration. schema: - type: string - example: "ch8i4ug6lnn4g9hqv7m0" + type: integer + format: int64 + example: 1 get: tags: - - IDP + - IDP SCIM Integrations summary: Get SCIM Integration Sync Logs description: Retrieves synchronization logs for a SCIM IDP integration. operationId: getSCIMIntegrationLogs @@ -9523,6 +10870,161 @@ paths: schema: $ref: '#/components/schemas/ErrorResponse' + /api/integrations/edr/fleetdm: + post: + tags: + - EDR FleetDM Integrations + summary: Create EDR FleetDM Integration + description: Creates a new EDR FleetDM integration + operationId: createFleetDMEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMRequest' + responses: + '200': + description: Integration created successfully. Returns the created integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMResponse' + '400': + description: Bad Request (e.g., invalid JSON, missing required fields, validation error). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized (e.g., missing or invalid authentication token). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + get: + tags: + - EDR FleetDM Integrations + summary: Get EDR FleetDM Integration + description: Retrieves a specific EDR FleetDM integration by its ID. + responses: + '200': + description: Successfully retrieved the integration details. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMResponse' + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found (e.g., integration with the given ID does not exist). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + put: + tags: + - EDR FleetDM Integrations + summary: Update EDR FleetDM Integration + description: Updates an existing EDR FleetDM Integration. + operationId: updateFleetDMEDRIntegration + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMRequest' + responses: + '200': + description: Integration updated successfully. Returns the updated integration. + content: + application/json: + schema: + $ref: '#/components/schemas/EDRFleetDMResponse' + '400': + description: Bad Request (e.g., invalid JSON, validation error, invalid ID). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + delete: + tags: + - EDR FleetDM Integrations + summary: Delete EDR FleetDM Integration + description: Deletes an EDR FleetDM Integration by its ID. + responses: + '200': + description: Integration deleted successfully. Returns an empty object. + content: + application/json: + schema: + type: object + example: { } + '400': + description: Bad Request (e.g., invalid integration ID format). + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '401': + description: Unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: Not Found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '500': + description: Internal Server Error. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /api/peers/{peer-id}/edr/bypass: parameters: - name: peer-id @@ -10227,3 +11729,172 @@ paths: "$ref": "#/components/responses/not_found" '500': "$ref": "#/components/responses/internal_error" + /api/integrations/notifications/types: + get: + tags: + - Notifications + summary: List Notification Event Types + description: | + Returns a map of all supported activity event type codes to their + human-readable descriptions. Use these codes when configuring + `event_types` on notification channels. + operationId: listNotificationEventTypes + responses: + '200': + description: A map of event type codes to descriptions. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationTypeEntry' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/integrations/notifications/channels: + get: + tags: + - Notifications + summary: List Notification Channels + description: Retrieves all notification channels configured for the authenticated account. + operationId: listNotificationChannels + responses: + '200': + description: A list of notification channels. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + post: + tags: + - Notifications + summary: Create Notification Channel + description: | + Creates a new notification channel for the authenticated account. + Supported channel types are `email` and `webhook`. + operationId: createNotificationChannel + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelRequest' + responses: + '200': + description: Notification channel created successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + /api/integrations/notifications/channels/{channelId}: + parameters: + - name: channelId + in: path + required: true + description: The unique identifier of the notification channel. + schema: + type: string + example: "ch8i4ug6lnn4g9hqv7m0" + get: + tags: + - Notifications + summary: Get Notification Channel + description: Retrieves a specific notification channel by its ID. + operationId: getNotificationChannel + responses: + '200': + description: Successfully retrieved the notification channel. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + put: + tags: + - Notifications + summary: Update Notification Channel + description: Updates an existing notification channel. + operationId: updateNotificationChannel + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelRequest' + responses: + '200': + description: Notification channel updated successfully. + content: + application/json: + schema: + $ref: '#/components/schemas/NotificationChannelResponse' + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" + delete: + tags: + - Notifications + summary: Delete Notification Channel + description: Deletes a notification channel by its ID. + operationId: deleteNotificationChannel + responses: + '200': + description: Notification channel deleted successfully. + content: + application/json: + schema: + type: object + example: { } + '400': + "$ref": "#/components/responses/bad_request" + '401': + "$ref": "#/components/responses/requires_authentication" + '403': + "$ref": "#/components/responses/forbidden" + '404': + "$ref": "#/components/responses/not_found" + '500': + "$ref": "#/components/responses/internal_error" diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go index 82fc5b0e4..f545f6303 100644 --- a/shared/management/http/api/types.gen.go +++ b/shared/management/http/api/types.gen.go @@ -9,6 +9,7 @@ import ( "time" "github.com/oapi-codegen/runtime" + openapi_types "github.com/oapi-codegen/runtime/types" ) const ( @@ -16,6 +17,24 @@ const ( TokenAuthScopes = "TokenAuth.Scopes" ) +// Defines values for CreateAzureIntegrationRequestHost. +const ( + CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com" + CreateAzureIntegrationRequestHostMicrosoftUs CreateAzureIntegrationRequestHost = "microsoft.us" +) + +// Valid indicates whether the value is a known member of the CreateAzureIntegrationRequestHost enum. +func (e CreateAzureIntegrationRequestHost) Valid() bool { + switch e { + case CreateAzureIntegrationRequestHostMicrosoftCom: + return true + case CreateAzureIntegrationRequestHostMicrosoftUs: + return true + default: + return false + } +} + // Defines values for CreateIntegrationRequestPlatform. const ( CreateIntegrationRequestPlatformDatadog CreateIntegrationRequestPlatform = "datadog" @@ -664,6 +683,24 @@ func (e NetworkResourceType) Valid() bool { } } +// Defines values for NotificationChannelType. +const ( + NotificationChannelTypeEmail NotificationChannelType = "email" + NotificationChannelTypeWebhook NotificationChannelType = "webhook" +) + +// Valid indicates whether the value is a known member of the NotificationChannelType enum. +func (e NotificationChannelType) Valid() bool { + switch e { + case NotificationChannelTypeEmail: + return true + case NotificationChannelTypeWebhook: + return true + default: + return false + } +} + // Defines values for PeerNetworkRangeCheckAction. const ( PeerNetworkRangeCheckActionAllow PeerNetworkRangeCheckAction = "allow" @@ -1450,6 +1487,39 @@ type AvailablePorts struct { Udp int `json:"udp"` } +// AzureIntegration defines model for AzureIntegration. +type AzureIntegration struct { + // ClientId Azure AD application (client) ID + ClientId string `json:"client_id"` + + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // Enabled Whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Host Azure host domain for the Graph API + Host string `json:"host"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of the last synchronization + LastSyncedAt time.Time `json:"last_synced_at"` + + // SyncInterval Sync interval in seconds + SyncInterval int `json:"sync_interval"` + + // TenantId Azure AD tenant ID + TenantId string `json:"tenant_id"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + // BearerAuthConfig defines model for BearerAuthConfig. type BearerAuthConfig struct { // DistributionGroups List of group IDs that can use bearer auth @@ -1557,6 +1627,57 @@ type Country struct { // CountryCode 2-letter ISO 3166-1 alpha-2 code that represents the country type CountryCode = string +// CreateAzureIntegrationRequest defines model for CreateAzureIntegrationRequest. +type CreateAzureIntegrationRequest struct { + // ClientId Azure AD application (client) ID + ClientId string `json:"client_id"` + + // ClientSecret Base64-encoded Azure AD client secret + ClientSecret string `json:"client_secret"` + + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // Host Azure host domain for the Graph API + Host CreateAzureIntegrationRequestHost `json:"host"` + + // SyncInterval Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + SyncInterval *int `json:"sync_interval,omitempty"` + + // TenantId Azure AD tenant ID + TenantId string `json:"tenant_id"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// CreateAzureIntegrationRequestHost Azure host domain for the Graph API +type CreateAzureIntegrationRequestHost string + +// CreateGoogleIntegrationRequest defines model for CreateGoogleIntegrationRequest. +type CreateGoogleIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // CustomerId Customer ID from Google Workspace Account Settings + CustomerId string `json:"customer_id"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // ServiceAccountKey Base64-encoded Google service account key + ServiceAccountKey string `json:"service_account_key"` + + // SyncInterval Sync interval in seconds (minimum 300). Defaults to 300 if not specified. + SyncInterval *int `json:"sync_interval,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + // CreateIntegrationRequest Request payload for creating a new event streaming integration. Also used as the structure for the PUT request body, but not all fields are applicable for updates (see PUT operation description). type CreateIntegrationRequest struct { // Config Platform-specific configuration as key-value pairs. For creation, all necessary credentials and settings must be provided. For updates, provide the fields to change or the entire new configuration. @@ -1572,8 +1693,26 @@ type CreateIntegrationRequest struct { // CreateIntegrationRequestPlatform The event streaming platform to integrate with (e.g., "datadog", "s3", "firehose"). This field is used for creation. For updates (PUT), this field, if sent, is ignored by the backend. type CreateIntegrationRequestPlatform string -// CreateScimIntegrationRequest Request payload for creating an SCIM IDP integration +// CreateOktaScimIntegrationRequest defines model for CreateOktaScimIntegrationRequest. +type CreateOktaScimIntegrationRequest struct { + // ConnectionName The Okta enterprise connection name on Auth0 + ConnectionName string `json:"connection_name"` + + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// CreateScimIntegrationRequest defines model for CreateScimIntegrationRequest. type CreateScimIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` @@ -1725,6 +1864,63 @@ type EDRFalconResponse struct { ZtaScoreThreshold int `json:"zta_score_threshold"` } +// EDRFleetDMRequest Request payload for creating or updating a FleetDM EDR integration +type EDRFleetDMRequest struct { + // ApiToken FleetDM API token + ApiToken string `json:"api_token"` + + // ApiUrl FleetDM server URL + ApiUrl string `json:"api_url"` + + // Enabled Indicates whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // Groups The Groups this integrations applies to + Groups []string `json:"groups"` + + // LastSyncedInterval The devices last sync requirement interval in hours. Minimum value is 24 hours + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly + MatchAttributes FleetDMMatchAttributes `json:"match_attributes"` +} + +// EDRFleetDMResponse Represents a FleetDM EDR integration configuration +type EDRFleetDMResponse struct { + // AccountId The identifier of the account this integration belongs to. + AccountId string `json:"account_id"` + + // ApiUrl FleetDM server URL + ApiUrl string `json:"api_url"` + + // CreatedAt Timestamp of when the integration was created. + CreatedAt time.Time `json:"created_at"` + + // CreatedBy The user id that created the integration + CreatedBy string `json:"created_by"` + + // Enabled Indicates whether the integration is enabled + Enabled bool `json:"enabled"` + + // Groups List of groups + Groups []Group `json:"groups"` + + // Id The unique numeric identifier for the integration. + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of when the integration was last synced. + LastSyncedAt time.Time `json:"last_synced_at"` + + // LastSyncedInterval The devices last sync requirement interval in hours. + LastSyncedInterval int `json:"last_synced_interval"` + + // MatchAttributes Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly + MatchAttributes FleetDMMatchAttributes `json:"match_attributes"` + + // UpdatedAt Timestamp of when the integration was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + // EDRHuntressRequest Request payload for creating or updating a EDR Huntress integration type EDRHuntressRequest struct { // ApiKey Huntress API key @@ -1893,6 +2089,12 @@ type EDRSentinelOneResponse struct { UpdatedAt time.Time `json:"updated_at"` } +// EmailTarget Target configuration for email notification channels. +type EmailTarget struct { + // Emails List of email addresses to send notifications to. + Emails []openapi_types.Email `json:"emails"` +} + // ErrorResponse Standard error response. Note: The exact structure of this error response is inferred from `util.WriteErrorResponse` and `util.WriteError` usage in the provided Go code, as a specific Go struct for errors was not provided. type ErrorResponse struct { // Message A human-readable error message. @@ -1932,6 +2134,24 @@ type Event struct { // EventActivityCode The string code of the activity that occurred during the event type EventActivityCode string +// FleetDMMatchAttributes Attribute conditions to match when approving FleetDM hosts. Most attributes work with FleetDM's free/open-source version. Premium-only attributes are marked accordingly +type FleetDMMatchAttributes struct { + // DiskEncryptionEnabled Whether disk encryption (FileVault/BitLocker) must be enabled on the host + DiskEncryptionEnabled *bool `json:"disk_encryption_enabled,omitempty"` + + // FailingPoliciesCountMax Maximum number of allowed failing policies. Use 0 to require all policies to pass + FailingPoliciesCountMax *int `json:"failing_policies_count_max,omitempty"` + + // RequiredPolicies List of FleetDM policy IDs that must be passing on the host. If any of these policies is failing, the host is non-compliant + RequiredPolicies *[]int `json:"required_policies,omitempty"` + + // StatusOnline Whether the host must be online (recently seen by Fleet) + StatusOnline *bool `json:"status_online,omitempty"` + + // VulnerableSoftwareCountMax Maximum number of allowed vulnerable software on the host + VulnerableSoftwareCountMax *int `json:"vulnerable_software_count_max,omitempty"` +} + // GeoLocationCheck Posture check for geo location type GeoLocationCheck struct { // Action Action to take upon policy match @@ -1947,6 +2167,33 @@ type GeoLocationCheckAction string // GetTenantsResponse defines model for GetTenantsResponse. type GetTenantsResponse = []TenantResponse +// GoogleIntegration defines model for GoogleIntegration. +type GoogleIntegration struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // CustomerId Customer ID from Google Workspace + CustomerId string `json:"customer_id"` + + // Enabled Whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of the last synchronization + LastSyncedAt time.Time `json:"last_synced_at"` + + // SyncInterval Sync interval in seconds + SyncInterval int `json:"sync_interval"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + // Group defines model for Group. type Group struct { // Id Group ID @@ -2238,6 +2485,12 @@ type InstanceVersionInfo struct { ManagementUpdateAvailable bool `json:"management_update_available"` } +// IntegrationEnabled defines model for IntegrationEnabled. +type IntegrationEnabled struct { + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` +} + // IntegrationResponse Represents an event streaming integration. type IntegrationResponse struct { // AccountId The identifier of the account this integration belongs to. @@ -2265,6 +2518,18 @@ type IntegrationResponse struct { // IntegrationResponsePlatform The event streaming platform. type IntegrationResponsePlatform string +// IntegrationSyncFilters defines model for IntegrationSyncFilters. +type IntegrationSyncFilters struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + // InvoicePDFResponse defines model for InvoicePDFResponse. type InvoicePDFResponse struct { // Url URL to redirect the user to invoice. @@ -2666,6 +2931,67 @@ type NetworkTrafficUser struct { Name string `json:"name"` } +// NotificationChannelRequest Request body for creating or updating a notification channel. +type NotificationChannelRequest struct { + // Enabled Whether this notification channel is active. + Enabled bool `json:"enabled"` + + // EventTypes List of activity event type codes this channel subscribes to. + EventTypes []NotificationEventType `json:"event_types"` + + // Target Channel-specific target configuration. The shape depends on the `type` field: + // - `email`: requires an `EmailTarget` object + // - `webhook`: requires a `WebhookTarget` object + Target *NotificationChannelRequest_Target `json:"target,omitempty"` + + // Type The type of notification channel. + Type NotificationChannelType `json:"type"` +} + +// NotificationChannelRequest_Target Channel-specific target configuration. The shape depends on the `type` field: +// - `email`: requires an `EmailTarget` object +// - `webhook`: requires a `WebhookTarget` object +type NotificationChannelRequest_Target struct { + union json.RawMessage +} + +// NotificationChannelResponse A notification channel configuration. +type NotificationChannelResponse struct { + // Enabled Whether this notification channel is active. + Enabled bool `json:"enabled"` + + // EventTypes List of activity event type codes this channel subscribes to. + EventTypes []NotificationEventType `json:"event_types"` + + // Id Unique identifier of the notification channel. + Id *string `json:"id,omitempty"` + + // Target Channel-specific target configuration. The shape depends on the `type` field: + // - `email`: an `EmailTarget` object + // - `webhook`: a `WebhookTarget` object + Target *NotificationChannelResponse_Target `json:"target,omitempty"` + + // Type The type of notification channel. + Type NotificationChannelType `json:"type"` +} + +// NotificationChannelResponse_Target Channel-specific target configuration. The shape depends on the `type` field: +// - `email`: an `EmailTarget` object +// - `webhook`: a `WebhookTarget` object +type NotificationChannelResponse_Target struct { + union json.RawMessage +} + +// NotificationChannelType The type of notification channel. +type NotificationChannelType string + +// NotificationEventType An activity event type code. See `GET /api/integrations/notifications/types` for the full list +// of supported event types and their human-readable descriptions. +type NotificationEventType = string + +// NotificationTypeEntry A map of event type codes to their human-readable descriptions. +type NotificationTypeEntry map[string]string + // OSVersionCheck Posture check for the version of operating system type OSVersionCheck struct { // Android Posture check for the version of operating system @@ -2684,6 +3010,30 @@ type OSVersionCheck struct { Windows *MinKernelVersionCheck `json:"windows,omitempty"` } +// OktaScimIntegration defines model for OktaScimIntegration. +type OktaScimIntegration struct { + // AuthToken SCIM API token (full on creation/regeneration, masked on retrieval) + AuthToken string `json:"auth_token"` + + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // Enabled Whether the integration is enabled + Enabled bool `json:"enabled"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes []string `json:"group_prefixes"` + + // Id The unique identifier for the integration + Id int64 `json:"id"` + + // LastSyncedAt Timestamp of the last synchronization + LastSyncedAt time.Time `json:"last_synced_at"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes []string `json:"user_group_prefixes"` +} + // PINAuthConfig defines model for PINAuthConfig. type PINAuthConfig struct { // Enabled Whether PIN auth is enabled @@ -3571,12 +3921,15 @@ type RulePortRange struct { Start int `json:"start"` } -// ScimIntegration Represents a SCIM IDP integration +// ScimIntegration defines model for ScimIntegration. type ScimIntegration struct { // AuthToken SCIM API token (full on creation, masked otherwise) AuthToken string `json:"auth_token"` - // Enabled Indicates whether the integration is enabled + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // Enabled Whether the integration is enabled Enabled bool `json:"enabled"` // GroupPrefixes List of start_with string patterns for groups to sync @@ -3588,6 +3941,9 @@ type ScimIntegration struct { // LastSyncedAt Timestamp of when the integration was last synced LastSyncedAt time.Time `json:"last_synced_at"` + // Prefix The connection prefix used for the SCIM provider + Prefix string `json:"prefix"` + // Provider Name of the SCIM identity provider Provider string `json:"provider"` @@ -3670,6 +4026,9 @@ type Service struct { // Targets List of target backends for this service Targets []ServiceTarget `json:"targets"` + + // Terminated Whether the service has been terminated. Terminated services cannot be updated. Services that violate the Terms of Service will be terminated. + Terminated *bool `json:"terminated,omitempty"` } // ServiceMode Service mode. "http" for L7 reverse proxy, "tcp"/"udp"/"tls" for L4 passthrough. @@ -3989,6 +4348,11 @@ type Subscription struct { UpdatedAt time.Time `json:"updated_at"` } +// SyncResult Response for a manual sync trigger +type SyncResult struct { + Result *string `json:"result,omitempty"` +} + // TenantGroupResponse defines model for TenantGroupResponse. type TenantGroupResponse struct { // Id The Group ID @@ -4034,14 +4398,86 @@ type TenantResponse struct { // TenantResponseStatus The status of the tenant type TenantResponseStatus string -// UpdateScimIntegrationRequest Request payload for updating an SCIM IDP integration -type UpdateScimIntegrationRequest struct { - // Enabled Indicates whether the integration is enabled +// UpdateAzureIntegrationRequest defines model for UpdateAzureIntegrationRequest. +type UpdateAzureIntegrationRequest struct { + // ClientId Azure AD application (client) ID + ClientId *string `json:"client_id,omitempty"` + + // ClientSecret Base64-encoded Azure AD client secret + ClientSecret *string `json:"client_secret,omitempty"` + + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // Enabled Whether the integration is enabled Enabled *bool `json:"enabled,omitempty"` // GroupPrefixes List of start_with string patterns for groups to sync GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + // SyncInterval Sync interval in seconds (minimum 300) + SyncInterval *int `json:"sync_interval,omitempty"` + + // TenantId Azure AD tenant ID + TenantId *string `json:"tenant_id,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateGoogleIntegrationRequest defines model for UpdateGoogleIntegrationRequest. +type UpdateGoogleIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // CustomerId Customer ID from Google Workspace Account Settings + CustomerId *string `json:"customer_id,omitempty"` + + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // ServiceAccountKey Base64-encoded Google service account key + ServiceAccountKey *string `json:"service_account_key,omitempty"` + + // SyncInterval Sync interval in seconds (minimum 300) + SyncInterval *int `json:"sync_interval,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateOktaScimIntegrationRequest defines model for UpdateOktaScimIntegrationRequest. +type UpdateOktaScimIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // UserGroupPrefixes List of start_with string patterns for groups which users to sync + UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` +} + +// UpdateScimIntegrationRequest defines model for UpdateScimIntegrationRequest. +type UpdateScimIntegrationRequest struct { + // ConnectorId DEX connector ID for embedded IDP setups + ConnectorId *string `json:"connector_id,omitempty"` + + // Enabled Whether the integration is enabled + Enabled *bool `json:"enabled,omitempty"` + + // GroupPrefixes List of start_with string patterns for groups to sync + GroupPrefixes *[]string `json:"group_prefixes,omitempty"` + + // Prefix The connection prefix used for the SCIM provider + Prefix *string `json:"prefix,omitempty"` + // UserGroupPrefixes List of start_with string patterns for groups which users to sync UserGroupPrefixes *[]string `json:"user_group_prefixes,omitempty"` } @@ -4249,6 +4685,16 @@ type UserRequest struct { Role string `json:"role"` } +// WebhookTarget Target configuration for webhook notification channels. +type WebhookTarget struct { + // Headers Custom HTTP headers sent with each webhook request. + // Values are write-only; in GET responses all values are masked. + Headers *map[string]string `json:"headers,omitempty"` + + // Url The webhook endpoint URL to send notifications to. + Url string `json:"url"` +} + // WorkloadRequest defines model for WorkloadRequest. type WorkloadRequest struct { union json.RawMessage @@ -4551,6 +4997,12 @@ type PostApiIngressPeersJSONRequestBody = IngressPeerCreateRequest // PutApiIngressPeersIngressPeerIdJSONRequestBody defines body for PutApiIngressPeersIngressPeerId for application/json ContentType. type PutApiIngressPeersIngressPeerIdJSONRequestBody = IngressPeerUpdateRequest +// CreateAzureIntegrationJSONRequestBody defines body for CreateAzureIntegration for application/json ContentType. +type CreateAzureIntegrationJSONRequestBody = CreateAzureIntegrationRequest + +// UpdateAzureIntegrationJSONRequestBody defines body for UpdateAzureIntegration for application/json ContentType. +type UpdateAzureIntegrationJSONRequestBody = UpdateAzureIntegrationRequest + // PostApiIntegrationsBillingAwsMarketplaceActivateJSONRequestBody defines body for PostApiIntegrationsBillingAwsMarketplaceActivate for application/json ContentType. type PostApiIntegrationsBillingAwsMarketplaceActivateJSONRequestBody PostApiIntegrationsBillingAwsMarketplaceActivateJSONBody @@ -4569,6 +5021,12 @@ type CreateFalconEDRIntegrationJSONRequestBody = EDRFalconRequest // UpdateFalconEDRIntegrationJSONRequestBody defines body for UpdateFalconEDRIntegration for application/json ContentType. type UpdateFalconEDRIntegrationJSONRequestBody = EDRFalconRequest +// CreateFleetDMEDRIntegrationJSONRequestBody defines body for CreateFleetDMEDRIntegration for application/json ContentType. +type CreateFleetDMEDRIntegrationJSONRequestBody = EDRFleetDMRequest + +// UpdateFleetDMEDRIntegrationJSONRequestBody defines body for UpdateFleetDMEDRIntegration for application/json ContentType. +type UpdateFleetDMEDRIntegrationJSONRequestBody = EDRFleetDMRequest + // CreateHuntressEDRIntegrationJSONRequestBody defines body for CreateHuntressEDRIntegration for application/json ContentType. type CreateHuntressEDRIntegrationJSONRequestBody = EDRHuntressRequest @@ -4587,6 +5045,12 @@ type CreateSentinelOneEDRIntegrationJSONRequestBody = EDRSentinelOneRequest // UpdateSentinelOneEDRIntegrationJSONRequestBody defines body for UpdateSentinelOneEDRIntegration for application/json ContentType. type UpdateSentinelOneEDRIntegrationJSONRequestBody = EDRSentinelOneRequest +// CreateGoogleIntegrationJSONRequestBody defines body for CreateGoogleIntegration for application/json ContentType. +type CreateGoogleIntegrationJSONRequestBody = CreateGoogleIntegrationRequest + +// UpdateGoogleIntegrationJSONRequestBody defines body for UpdateGoogleIntegration for application/json ContentType. +type UpdateGoogleIntegrationJSONRequestBody = UpdateGoogleIntegrationRequest + // PostApiIntegrationsMspTenantsJSONRequestBody defines body for PostApiIntegrationsMspTenants for application/json ContentType. type PostApiIntegrationsMspTenantsJSONRequestBody = CreateTenantRequest @@ -4602,6 +5066,18 @@ type PostApiIntegrationsMspTenantsIdSubscriptionJSONRequestBody PostApiIntegrati // PostApiIntegrationsMspTenantsIdUnlinkJSONRequestBody defines body for PostApiIntegrationsMspTenantsIdUnlink for application/json ContentType. type PostApiIntegrationsMspTenantsIdUnlinkJSONRequestBody PostApiIntegrationsMspTenantsIdUnlinkJSONBody +// CreateNotificationChannelJSONRequestBody defines body for CreateNotificationChannel for application/json ContentType. +type CreateNotificationChannelJSONRequestBody = NotificationChannelRequest + +// UpdateNotificationChannelJSONRequestBody defines body for UpdateNotificationChannel for application/json ContentType. +type UpdateNotificationChannelJSONRequestBody = NotificationChannelRequest + +// CreateOktaScimIntegrationJSONRequestBody defines body for CreateOktaScimIntegration for application/json ContentType. +type CreateOktaScimIntegrationJSONRequestBody = CreateOktaScimIntegrationRequest + +// UpdateOktaScimIntegrationJSONRequestBody defines body for UpdateOktaScimIntegration for application/json ContentType. +type UpdateOktaScimIntegrationJSONRequestBody = UpdateOktaScimIntegrationRequest + // CreateSCIMIntegrationJSONRequestBody defines body for CreateSCIMIntegration for application/json ContentType. type CreateSCIMIntegrationJSONRequestBody = CreateScimIntegrationRequest @@ -4701,6 +5177,130 @@ type PutApiUsersUserIdPasswordJSONRequestBody = PasswordChangeRequest // PostApiUsersUserIdTokensJSONRequestBody defines body for PostApiUsersUserIdTokens for application/json ContentType. type PostApiUsersUserIdTokensJSONRequestBody = PersonalAccessTokenRequest +// AsEmailTarget returns the union data inside the NotificationChannelRequest_Target as a EmailTarget +func (t NotificationChannelRequest_Target) AsEmailTarget() (EmailTarget, error) { + var body EmailTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromEmailTarget overwrites any union data inside the NotificationChannelRequest_Target as the provided EmailTarget +func (t *NotificationChannelRequest_Target) FromEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeEmailTarget performs a merge with any union data inside the NotificationChannelRequest_Target, using the provided EmailTarget +func (t *NotificationChannelRequest_Target) MergeEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsWebhookTarget returns the union data inside the NotificationChannelRequest_Target as a WebhookTarget +func (t NotificationChannelRequest_Target) AsWebhookTarget() (WebhookTarget, error) { + var body WebhookTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromWebhookTarget overwrites any union data inside the NotificationChannelRequest_Target as the provided WebhookTarget +func (t *NotificationChannelRequest_Target) FromWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeWebhookTarget performs a merge with any union data inside the NotificationChannelRequest_Target, using the provided WebhookTarget +func (t *NotificationChannelRequest_Target) MergeWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t NotificationChannelRequest_Target) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *NotificationChannelRequest_Target) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} + +// AsEmailTarget returns the union data inside the NotificationChannelResponse_Target as a EmailTarget +func (t NotificationChannelResponse_Target) AsEmailTarget() (EmailTarget, error) { + var body EmailTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromEmailTarget overwrites any union data inside the NotificationChannelResponse_Target as the provided EmailTarget +func (t *NotificationChannelResponse_Target) FromEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeEmailTarget performs a merge with any union data inside the NotificationChannelResponse_Target, using the provided EmailTarget +func (t *NotificationChannelResponse_Target) MergeEmailTarget(v EmailTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +// AsWebhookTarget returns the union data inside the NotificationChannelResponse_Target as a WebhookTarget +func (t NotificationChannelResponse_Target) AsWebhookTarget() (WebhookTarget, error) { + var body WebhookTarget + err := json.Unmarshal(t.union, &body) + return body, err +} + +// FromWebhookTarget overwrites any union data inside the NotificationChannelResponse_Target as the provided WebhookTarget +func (t *NotificationChannelResponse_Target) FromWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + t.union = b + return err +} + +// MergeWebhookTarget performs a merge with any union data inside the NotificationChannelResponse_Target, using the provided WebhookTarget +func (t *NotificationChannelResponse_Target) MergeWebhookTarget(v WebhookTarget) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + + merged, err := runtime.JSONMerge(t.union, b) + t.union = merged + return err +} + +func (t NotificationChannelResponse_Target) MarshalJSON() ([]byte, error) { + b, err := t.union.MarshalJSON() + return b, err +} + +func (t *NotificationChannelResponse_Target) UnmarshalJSON(b []byte) error { + err := t.union.UnmarshalJSON(b) + return err +} + // AsBundleWorkloadRequest returns the union data inside the WorkloadRequest as a BundleWorkloadRequest func (t WorkloadRequest) AsBundleWorkloadRequest() (BundleWorkloadRequest, error) { var body BundleWorkloadRequest diff --git a/shared/management/proto/management.pb.go b/shared/management/proto/management.pb.go index c5581296c..604f9c793 100644 --- a/shared/management/proto/management.pb.go +++ b/shared/management/proto/management.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v6.33.0 +// protoc v7.34.1 // source: management.proto package proto @@ -2259,8 +2259,8 @@ type AutoUpdateSettings struct { unknownFields protoimpl.UnknownFields Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // alwaysUpdate = true → Updates happen automatically in the background - // alwaysUpdate = false → Updates only happen when triggered by a peer connection + // alwaysUpdate = true → Updates are installed automatically in the background + // alwaysUpdate = false → Updates require user interaction from the UI AlwaysUpdate bool `protobuf:"varint,2,opt,name=alwaysUpdate,proto3" json:"alwaysUpdate,omitempty"` } @@ -2928,7 +2928,9 @@ type ProviderConfig struct { // An IDP application client id ClientID string `protobuf:"bytes,1,opt,name=ClientID,proto3" json:"ClientID,omitempty"` - // An IDP application client secret + // Deprecated: use embedded IdP for providers that require a client secret (e.g. Google Workspace). + // + // Deprecated: Do not use. ClientSecret string `protobuf:"bytes,2,opt,name=ClientSecret,proto3" json:"ClientSecret,omitempty"` // An IDP API domain // Deprecated. Use a DeviceAuthEndpoint and TokenEndpoint @@ -2992,6 +2994,7 @@ func (x *ProviderConfig) GetClientID() string { return "" } +// Deprecated: Do not use. func (x *ProviderConfig) GetClientSecret() string { if x != nil { return x.ClientSecret @@ -4847,287 +4850,287 @@ var file_management_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0xb8, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, + 0x69, 0x67, 0x22, 0xbc, 0x03, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x65, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, - 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, 0x73, 0x65, 0x49, 0x44, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x52, - 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x12, - 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, - 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x44, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, - 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x93, 0x02, - 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, - 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x0a, - 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x10, - 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x52, 0x0b, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x46, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, - 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, - 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x07, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, - 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x4e, - 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x22, - 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, - 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, - 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, - 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, - 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x4e, - 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x4e, 0x53, 0x54, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, - 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x50, 0x12, 0x37, - 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x44, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x12, 0x0a, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x22, - 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, 0x06, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x50, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x32, 0x0a, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x46, 0x69, 0x72, 0x65, - 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, - 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, - 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, - 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x26, - 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, 0x22, 0xf2, 0x01, 0x0a, - 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, - 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, - 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, - 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, - 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, - 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, - 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1f, 0x0a, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1f, - 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, - 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x73, 0x73, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, - 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x70, + 0x44, 0x12, 0x26, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x41, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, + 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x44, 0x65, 0x76, 0x69, 0x63, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, + 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, 0x65, + 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x55, + 0x73, 0x65, 0x49, 0x44, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x52, 0x4c, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, + 0x52, 0x4c, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, + 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, 0x67, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x46, 0x6c, 0x61, + 0x67, 0x22, 0x93, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, 0x61, 0x64, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4d, 0x61, 0x73, 0x71, 0x75, 0x65, 0x72, + 0x61, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x4e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x41, 0x75, + 0x74, 0x6f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x22, 0xde, 0x01, 0x0a, 0x09, 0x44, 0x4e, 0x53, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x4e, + 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x52, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, + 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, + 0x65, 0x52, 0x0b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x28, + 0x0a, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x46, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x43, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, + 0x32, 0x0a, 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x69, + 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x44, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x4e, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x22, 0x74, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, + 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x52, 0x44, 0x61, 0x74, 0x61, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x4e, 0x61, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x38, 0x0a, + 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x0b, 0x4e, 0x61, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x53, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x53, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, + 0x48, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, + 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, + 0x06, 0x4e, 0x53, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4e, + 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xa7, 0x02, 0x0a, 0x0c, 0x46, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, + 0x65, 0x72, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, + 0x49, 0x50, 0x12, 0x37, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x41, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x49, 0x44, 0x22, 0x38, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x50, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x22, 0x1e, 0x0a, + 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0x96, 0x01, + 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x46, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x75, + 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x30, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, + 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, + 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, + 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x44, + 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, + 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x6f, 0x72, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, + 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x75, + 0x73, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, + 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x72, + 0x74, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, - 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x3a, 0x0a, 0x09, 0x4a, - 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, 0x75, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x66, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, 0x75, 0x6c, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x03, 0x12, - 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, - 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, 0x65, 0x44, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, 0x75, 0x6c, 0x65, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, 0x63, 0x0a, 0x0e, 0x45, - 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0f, 0x0a, - 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x10, 0x00, 0x12, 0x10, - 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x01, - 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x43, 0x50, 0x10, 0x02, - 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x44, 0x50, 0x10, 0x03, - 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, 0x4c, 0x53, 0x10, 0x04, - 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, - 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, - 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, + 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, + 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x74, 0x6f, + 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, + 0x3a, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x0e, + 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x10, 0x01, 0x12, + 0x0a, 0x0a, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x4c, 0x0a, 0x0c, 0x52, + 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, + 0x01, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, + 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x43, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0a, 0x0a, + 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x05, 0x2a, 0x20, 0x0a, 0x0d, 0x52, 0x75, 0x6c, + 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x22, 0x0a, 0x0a, 0x52, + 0x75, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x43, + 0x45, 0x50, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x52, 0x4f, 0x50, 0x10, 0x01, 0x2a, + 0x63, 0x0a, 0x0e, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, 0x50, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x48, 0x54, 0x54, + 0x50, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, + 0x43, 0x50, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, + 0x44, 0x50, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x54, + 0x4c, 0x53, 0x10, 0x04, 0x32, 0xfd, 0x06, 0x0a, 0x11, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x05, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x09, 0x69, 0x73, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x5a, - 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x18, 0x47, 0x65, - 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x1c, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0c, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x12, 0x46, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x65, 0x6e, 0x65, 0x77, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, + 0x09, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x12, 0x11, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x11, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, + 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, + 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x4b, 0x43, 0x45, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x08, 0x53, 0x79, 0x6e, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, + 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, + 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, + 0x65, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0a, 0x53, 0x74, 0x6f, 0x70, 0x45, 0x78, 0x70, 0x6f, - 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, - 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x67, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/shared/management/proto/management.proto b/shared/management/proto/management.proto index 9acf7e2b3..70a530679 100644 --- a/shared/management/proto/management.proto +++ b/shared/management/proto/management.proto @@ -464,8 +464,8 @@ message PKCEAuthorizationFlow { message ProviderConfig { // An IDP application client id string ClientID = 1; - // An IDP application client secret - string ClientSecret = 2; + // Deprecated: use embedded IdP for providers that require a client secret (e.g. Google Workspace). + string ClientSecret = 2 [deprecated = true]; // An IDP API domain // Deprecated. Use a DeviceAuthEndpoint and TokenEndpoint string Domain = 3; diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go index 93295e857..81637f69e 100644 --- a/shared/management/proto/proxy_service.pb.go +++ b/shared/management/proto/proxy_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 -// protoc v6.33.3 +// protoc-gen-go v1.26.0 +// protoc v7.34.1 // source: proxy_service.proto package proto @@ -13,7 +13,6 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -178,21 +177,24 @@ func (ProxyStatus) EnumDescriptor() ([]byte, []int) { // ProxyCapabilities describes what a proxy can handle. type ProxyCapabilities struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Whether the proxy can bind arbitrary ports for TCP/UDP/TLS services. SupportsCustomPorts *bool `protobuf:"varint,1,opt,name=supports_custom_ports,json=supportsCustomPorts,proto3,oneof" json:"supports_custom_ports,omitempty"` // Whether the proxy requires a subdomain label in front of its cluster domain. - // When true, tenants cannot use the cluster domain bare. + // When true, accounts cannot use the cluster domain bare. RequireSubdomain *bool `protobuf:"varint,2,opt,name=require_subdomain,json=requireSubdomain,proto3,oneof" json:"require_subdomain,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *ProxyCapabilities) Reset() { *x = ProxyCapabilities{} - mi := &file_proxy_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ProxyCapabilities) String() string { @@ -203,7 +205,7 @@ func (*ProxyCapabilities) ProtoMessage() {} func (x *ProxyCapabilities) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[0] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -234,21 +236,24 @@ func (x *ProxyCapabilities) GetRequireSubdomain() bool { // GetMappingUpdateRequest is sent to initialise a mapping stream. type GetMappingUpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` - Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProxyId string `protobuf:"bytes,1,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + StartedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` + Capabilities *ProxyCapabilities `protobuf:"bytes,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"` } func (x *GetMappingUpdateRequest) Reset() { *x = GetMappingUpdateRequest{} - mi := &file_proxy_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetMappingUpdateRequest) String() string { @@ -259,7 +264,7 @@ func (*GetMappingUpdateRequest) ProtoMessage() {} func (x *GetMappingUpdateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[1] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -313,20 +318,23 @@ func (x *GetMappingUpdateRequest) GetCapabilities() *ProxyCapabilities { // No mappings may be sent to test the liveness of the Proxy. // Mappings that are sent should be interpreted by the Proxy appropriately. type GetMappingUpdateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mapping []*ProxyMapping `protobuf:"bytes,1,rep,name=mapping,proto3" json:"mapping,omitempty"` // initial_sync_complete is set on the last message of the initial snapshot. // The proxy uses this to signal that startup is complete. InitialSyncComplete bool `protobuf:"varint,2,opt,name=initial_sync_complete,json=initialSyncComplete,proto3" json:"initial_sync_complete,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *GetMappingUpdateResponse) Reset() { *x = GetMappingUpdateResponse{} - mi := &file_proxy_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetMappingUpdateResponse) String() string { @@ -337,7 +345,7 @@ func (*GetMappingUpdateResponse) ProtoMessage() {} func (x *GetMappingUpdateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[2] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -367,24 +375,27 @@ func (x *GetMappingUpdateResponse) GetInitialSyncComplete() bool { } type PathTargetOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` - RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` - PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` - CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SkipTlsVerify bool `protobuf:"varint,1,opt,name=skip_tls_verify,json=skipTlsVerify,proto3" json:"skip_tls_verify,omitempty"` + RequestTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"` + PathRewrite PathRewriteMode `protobuf:"varint,3,opt,name=path_rewrite,json=pathRewrite,proto3,enum=management.PathRewriteMode" json:"path_rewrite,omitempty"` + CustomHeaders map[string]string `protobuf:"bytes,4,rep,name=custom_headers,json=customHeaders,proto3" json:"custom_headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Send PROXY protocol v2 header to this backend. ProxyProtocol bool `protobuf:"varint,5,opt,name=proxy_protocol,json=proxyProtocol,proto3" json:"proxy_protocol,omitempty"` // Idle timeout before a UDP session is reaped. SessionIdleTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=session_idle_timeout,json=sessionIdleTimeout,proto3" json:"session_idle_timeout,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *PathTargetOptions) Reset() { *x = PathTargetOptions{} - mi := &file_proxy_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PathTargetOptions) String() string { @@ -395,7 +406,7 @@ func (*PathTargetOptions) ProtoMessage() {} func (x *PathTargetOptions) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[3] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -453,19 +464,22 @@ func (x *PathTargetOptions) GetSessionIdleTimeout() *durationpb.Duration { } type PathMapping struct { - state protoimpl.MessageState `protogen:"open.v1"` - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` - Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` + Options *PathTargetOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` } func (x *PathMapping) Reset() { *x = PathMapping{} - mi := &file_proxy_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PathMapping) String() string { @@ -476,7 +490,7 @@ func (*PathMapping) ProtoMessage() {} func (x *PathMapping) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[4] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -513,20 +527,23 @@ func (x *PathMapping) GetOptions() *PathTargetOptions { } type HeaderAuth struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Header name to check, e.g. "Authorization", "X-API-Key". Header string `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` // argon2id hash of the expected full header value. - HashedValue string `protobuf:"bytes,2,opt,name=hashed_value,json=hashedValue,proto3" json:"hashed_value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + HashedValue string `protobuf:"bytes,2,opt,name=hashed_value,json=hashedValue,proto3" json:"hashed_value,omitempty"` } func (x *HeaderAuth) Reset() { *x = HeaderAuth{} - mi := &file_proxy_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *HeaderAuth) String() string { @@ -537,7 +554,7 @@ func (*HeaderAuth) ProtoMessage() {} func (x *HeaderAuth) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[5] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -567,22 +584,25 @@ func (x *HeaderAuth) GetHashedValue() string { } type Authentication struct { - state protoimpl.MessageState `protogen:"open.v1"` - SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` - MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` - Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` - Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` - Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` - HeaderAuths []*HeaderAuth `protobuf:"bytes,6,rep,name=header_auths,json=headerAuths,proto3" json:"header_auths,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SessionKey string `protobuf:"bytes,1,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` + MaxSessionAgeSeconds int64 `protobuf:"varint,2,opt,name=max_session_age_seconds,json=maxSessionAgeSeconds,proto3" json:"max_session_age_seconds,omitempty"` + Password bool `protobuf:"varint,3,opt,name=password,proto3" json:"password,omitempty"` + Pin bool `protobuf:"varint,4,opt,name=pin,proto3" json:"pin,omitempty"` + Oidc bool `protobuf:"varint,5,opt,name=oidc,proto3" json:"oidc,omitempty"` + HeaderAuths []*HeaderAuth `protobuf:"bytes,6,rep,name=header_auths,json=headerAuths,proto3" json:"header_auths,omitempty"` } func (x *Authentication) Reset() { *x = Authentication{} - mi := &file_proxy_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *Authentication) String() string { @@ -593,7 +613,7 @@ func (*Authentication) ProtoMessage() {} func (x *Authentication) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[6] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -651,20 +671,23 @@ func (x *Authentication) GetHeaderAuths() []*HeaderAuth { } type AccessRestrictions struct { - state protoimpl.MessageState `protogen:"open.v1"` - AllowedCidrs []string `protobuf:"bytes,1,rep,name=allowed_cidrs,json=allowedCidrs,proto3" json:"allowed_cidrs,omitempty"` - BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"` - AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"` - BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllowedCidrs []string `protobuf:"bytes,1,rep,name=allowed_cidrs,json=allowedCidrs,proto3" json:"allowed_cidrs,omitempty"` + BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"` + AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"` + BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"` } func (x *AccessRestrictions) Reset() { *x = AccessRestrictions{} - mi := &file_proxy_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AccessRestrictions) String() string { @@ -675,7 +698,7 @@ func (*AccessRestrictions) ProtoMessage() {} func (x *AccessRestrictions) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[7] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -719,7 +742,10 @@ func (x *AccessRestrictions) GetBlockedCountries() []string { } type ProxyMapping struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Type ProxyMappingUpdateType `protobuf:"varint,1,opt,name=type,proto3,enum=management.ProxyMappingUpdateType" json:"type,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` @@ -738,15 +764,15 @@ type ProxyMapping struct { // For L4/TLS: the port the proxy listens on. ListenPort int32 `protobuf:"varint,11,opt,name=listen_port,json=listenPort,proto3" json:"listen_port,omitempty"` AccessRestrictions *AccessRestrictions `protobuf:"bytes,12,opt,name=access_restrictions,json=accessRestrictions,proto3" json:"access_restrictions,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *ProxyMapping) Reset() { *x = ProxyMapping{} - mi := &file_proxy_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ProxyMapping) String() string { @@ -757,7 +783,7 @@ func (*ProxyMapping) ProtoMessage() {} func (x *ProxyMapping) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[8] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -858,17 +884,20 @@ func (x *ProxyMapping) GetAccessRestrictions() *AccessRestrictions { // SendAccessLogRequest consists of one or more AccessLogs from a Proxy. type SendAccessLogRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Log *AccessLog `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` } func (x *SendAccessLogRequest) Reset() { *x = SendAccessLogRequest{} - mi := &file_proxy_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendAccessLogRequest) String() string { @@ -879,7 +908,7 @@ func (*SendAccessLogRequest) ProtoMessage() {} func (x *SendAccessLogRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[9] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -903,16 +932,18 @@ func (x *SendAccessLogRequest) GetLog() *AccessLog { // SendAccessLogResponse is intentionally empty to allow for future expansion. type SendAccessLogResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *SendAccessLogResponse) Reset() { *x = SendAccessLogResponse{} - mi := &file_proxy_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendAccessLogResponse) String() string { @@ -923,7 +954,7 @@ func (*SendAccessLogResponse) ProtoMessage() {} func (x *SendAccessLogResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[10] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -939,7 +970,10 @@ func (*SendAccessLogResponse) Descriptor() ([]byte, []int) { } type AccessLog struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` LogId string `protobuf:"bytes,2,opt,name=log_id,json=logId,proto3" json:"log_id,omitempty"` AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` @@ -956,15 +990,15 @@ type AccessLog struct { BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"` BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"` Protocol string `protobuf:"bytes,16,opt,name=protocol,proto3" json:"protocol,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *AccessLog) Reset() { *x = AccessLog{} - mi := &file_proxy_service_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AccessLog) String() string { @@ -975,7 +1009,7 @@ func (*AccessLog) ProtoMessage() {} func (x *AccessLog) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[11] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1103,24 +1137,27 @@ func (x *AccessLog) GetProtocol() string { } type AuthenticateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - // Types that are valid to be assigned to Request: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + // Types that are assignable to Request: // // *AuthenticateRequest_Password // *AuthenticateRequest_Pin // *AuthenticateRequest_HeaderAuth - Request isAuthenticateRequest_Request `protobuf_oneof:"request"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Request isAuthenticateRequest_Request `protobuf_oneof:"request"` } func (x *AuthenticateRequest) Reset() { *x = AuthenticateRequest{} - mi := &file_proxy_service_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AuthenticateRequest) String() string { @@ -1131,7 +1168,7 @@ func (*AuthenticateRequest) ProtoMessage() {} func (x *AuthenticateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[12] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1160,36 +1197,30 @@ func (x *AuthenticateRequest) GetAccountId() string { return "" } -func (x *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { - if x != nil { - return x.Request +func (m *AuthenticateRequest) GetRequest() isAuthenticateRequest_Request { + if m != nil { + return m.Request } return nil } func (x *AuthenticateRequest) GetPassword() *PasswordRequest { - if x != nil { - if x, ok := x.Request.(*AuthenticateRequest_Password); ok { - return x.Password - } + if x, ok := x.GetRequest().(*AuthenticateRequest_Password); ok { + return x.Password } return nil } func (x *AuthenticateRequest) GetPin() *PinRequest { - if x != nil { - if x, ok := x.Request.(*AuthenticateRequest_Pin); ok { - return x.Pin - } + if x, ok := x.GetRequest().(*AuthenticateRequest_Pin); ok { + return x.Pin } return nil } func (x *AuthenticateRequest) GetHeaderAuth() *HeaderAuthRequest { - if x != nil { - if x, ok := x.Request.(*AuthenticateRequest_HeaderAuth); ok { - return x.HeaderAuth - } + if x, ok := x.GetRequest().(*AuthenticateRequest_HeaderAuth); ok { + return x.HeaderAuth } return nil } @@ -1217,18 +1248,21 @@ func (*AuthenticateRequest_Pin) isAuthenticateRequest_Request() {} func (*AuthenticateRequest_HeaderAuth) isAuthenticateRequest_Request() {} type HeaderAuthRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - HeaderValue string `protobuf:"bytes,1,opt,name=header_value,json=headerValue,proto3" json:"header_value,omitempty"` - HeaderName string `protobuf:"bytes,2,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HeaderValue string `protobuf:"bytes,1,opt,name=header_value,json=headerValue,proto3" json:"header_value,omitempty"` + HeaderName string `protobuf:"bytes,2,opt,name=header_name,json=headerName,proto3" json:"header_name,omitempty"` } func (x *HeaderAuthRequest) Reset() { *x = HeaderAuthRequest{} - mi := &file_proxy_service_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *HeaderAuthRequest) String() string { @@ -1239,7 +1273,7 @@ func (*HeaderAuthRequest) ProtoMessage() {} func (x *HeaderAuthRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[13] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1269,17 +1303,20 @@ func (x *HeaderAuthRequest) GetHeaderName() string { } type PasswordRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` } func (x *PasswordRequest) Reset() { *x = PasswordRequest{} - mi := &file_proxy_service_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PasswordRequest) String() string { @@ -1290,7 +1327,7 @@ func (*PasswordRequest) ProtoMessage() {} func (x *PasswordRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[14] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1313,17 +1350,20 @@ func (x *PasswordRequest) GetPassword() string { } type PinRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pin string `protobuf:"bytes,1,opt,name=pin,proto3" json:"pin,omitempty"` } func (x *PinRequest) Reset() { *x = PinRequest{} - mi := &file_proxy_service_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *PinRequest) String() string { @@ -1334,7 +1374,7 @@ func (*PinRequest) ProtoMessage() {} func (x *PinRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[15] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1357,18 +1397,21 @@ func (x *PinRequest) GetPin() string { } type AuthenticateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` } func (x *AuthenticateResponse) Reset() { *x = AuthenticateResponse{} - mi := &file_proxy_service_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *AuthenticateResponse) String() string { @@ -1379,7 +1422,7 @@ func (*AuthenticateResponse) ProtoMessage() {} func (x *AuthenticateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[16] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1410,21 +1453,24 @@ func (x *AuthenticateResponse) GetSessionToken() string { // SendStatusUpdateRequest is sent by the proxy to update its status type SendStatusUpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` - CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` - ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Status ProxyStatus `protobuf:"varint,3,opt,name=status,proto3,enum=management.ProxyStatus" json:"status,omitempty"` + CertificateIssued bool `protobuf:"varint,4,opt,name=certificate_issued,json=certificateIssued,proto3" json:"certificate_issued,omitempty"` + ErrorMessage *string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` } func (x *SendStatusUpdateRequest) Reset() { *x = SendStatusUpdateRequest{} - mi := &file_proxy_service_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendStatusUpdateRequest) String() string { @@ -1435,7 +1481,7 @@ func (*SendStatusUpdateRequest) ProtoMessage() {} func (x *SendStatusUpdateRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[17] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1487,16 +1533,18 @@ func (x *SendStatusUpdateRequest) GetErrorMessage() string { // SendStatusUpdateResponse is intentionally empty to allow for future expansion type SendStatusUpdateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *SendStatusUpdateResponse) Reset() { *x = SendStatusUpdateResponse{} - mi := &file_proxy_service_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SendStatusUpdateResponse) String() string { @@ -1507,7 +1555,7 @@ func (*SendStatusUpdateResponse) ProtoMessage() {} func (x *SendStatusUpdateResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[18] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1525,21 +1573,24 @@ func (*SendStatusUpdateResponse) Descriptor() ([]byte, []int) { // CreateProxyPeerRequest is sent by the proxy to create a peer connection // The token is a one-time authentication token sent via ProxyMapping type CreateProxyPeerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` - WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` - Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + WireguardPublicKey string `protobuf:"bytes,4,opt,name=wireguard_public_key,json=wireguardPublicKey,proto3" json:"wireguard_public_key,omitempty"` + Cluster string `protobuf:"bytes,5,opt,name=cluster,proto3" json:"cluster,omitempty"` } func (x *CreateProxyPeerRequest) Reset() { *x = CreateProxyPeerRequest{} - mi := &file_proxy_service_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CreateProxyPeerRequest) String() string { @@ -1550,7 +1601,7 @@ func (*CreateProxyPeerRequest) ProtoMessage() {} func (x *CreateProxyPeerRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[19] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1602,18 +1653,21 @@ func (x *CreateProxyPeerRequest) GetCluster() string { // CreateProxyPeerResponse contains the result of peer creation type CreateProxyPeerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` } func (x *CreateProxyPeerResponse) Reset() { *x = CreateProxyPeerResponse{} - mi := &file_proxy_service_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CreateProxyPeerResponse) String() string { @@ -1624,7 +1678,7 @@ func (*CreateProxyPeerResponse) ProtoMessage() {} func (x *CreateProxyPeerResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[20] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1654,19 +1708,22 @@ func (x *CreateProxyPeerResponse) GetErrorMessage() string { } type GetOIDCURLRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` - RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AccountId string `protobuf:"bytes,2,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` + RedirectUrl string `protobuf:"bytes,3,opt,name=redirect_url,json=redirectUrl,proto3" json:"redirect_url,omitempty"` } func (x *GetOIDCURLRequest) Reset() { *x = GetOIDCURLRequest{} - mi := &file_proxy_service_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetOIDCURLRequest) String() string { @@ -1677,7 +1734,7 @@ func (*GetOIDCURLRequest) ProtoMessage() {} func (x *GetOIDCURLRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[21] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1714,17 +1771,20 @@ func (x *GetOIDCURLRequest) GetRedirectUrl() string { } type GetOIDCURLResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (x *GetOIDCURLResponse) Reset() { *x = GetOIDCURLResponse{} - mi := &file_proxy_service_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *GetOIDCURLResponse) String() string { @@ -1735,7 +1795,7 @@ func (*GetOIDCURLResponse) ProtoMessage() {} func (x *GetOIDCURLResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[22] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1758,18 +1818,21 @@ func (x *GetOIDCURLResponse) GetUrl() string { } type ValidateSessionRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` + SessionToken string `protobuf:"bytes,2,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` } func (x *ValidateSessionRequest) Reset() { *x = ValidateSessionRequest{} - mi := &file_proxy_service_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ValidateSessionRequest) String() string { @@ -1780,7 +1843,7 @@ func (*ValidateSessionRequest) ProtoMessage() {} func (x *ValidateSessionRequest) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[23] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1810,20 +1873,23 @@ func (x *ValidateSessionRequest) GetSessionToken() string { } type ValidateSessionResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` - DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + UserEmail string `protobuf:"bytes,3,opt,name=user_email,json=userEmail,proto3" json:"user_email,omitempty"` + DeniedReason string `protobuf:"bytes,4,opt,name=denied_reason,json=deniedReason,proto3" json:"denied_reason,omitempty"` } func (x *ValidateSessionResponse) Reset() { *x = ValidateSessionResponse{} - mi := &file_proxy_service_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_proxy_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *ValidateSessionResponse) String() string { @@ -1834,7 +1900,7 @@ func (*ValidateSessionResponse) ProtoMessage() {} func (x *ValidateSessionResponse) ProtoReflect() protoreflect.Message { mi := &file_proxy_service_proto_msgTypes[24] - if x != nil { + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1879,195 +1945,356 @@ func (x *ValidateSessionResponse) GetDeniedReason() string { var File_proxy_service_proto protoreflect.FileDescriptor -const file_proxy_service_proto_rawDesc = "" + - "\n" + - "\x13proxy_service.proto\x12\n" + - "management\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xae\x01\n" + - "\x11ProxyCapabilities\x127\n" + - "\x15supports_custom_ports\x18\x01 \x01(\bH\x00R\x13supportsCustomPorts\x88\x01\x01\x120\n" + - "\x11require_subdomain\x18\x02 \x01(\bH\x01R\x10requireSubdomain\x88\x01\x01B\x18\n" + - "\x16_supports_custom_portsB\x14\n" + - "\x12_require_subdomain\"\xe6\x01\n" + - "\x17GetMappingUpdateRequest\x12\x19\n" + - "\bproxy_id\x18\x01 \x01(\tR\aproxyId\x12\x18\n" + - "\aversion\x18\x02 \x01(\tR\aversion\x129\n" + - "\n" + - "started_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tstartedAt\x12\x18\n" + - "\aaddress\x18\x04 \x01(\tR\aaddress\x12A\n" + - "\fcapabilities\x18\x05 \x01(\v2\x1d.management.ProxyCapabilitiesR\fcapabilities\"\x82\x01\n" + - "\x18GetMappingUpdateResponse\x122\n" + - "\amapping\x18\x01 \x03(\v2\x18.management.ProxyMappingR\amapping\x122\n" + - "\x15initial_sync_complete\x18\x02 \x01(\bR\x13initialSyncComplete\"\xce\x03\n" + - "\x11PathTargetOptions\x12&\n" + - "\x0fskip_tls_verify\x18\x01 \x01(\bR\rskipTlsVerify\x12B\n" + - "\x0frequest_timeout\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x0erequestTimeout\x12>\n" + - "\fpath_rewrite\x18\x03 \x01(\x0e2\x1b.management.PathRewriteModeR\vpathRewrite\x12W\n" + - "\x0ecustom_headers\x18\x04 \x03(\v20.management.PathTargetOptions.CustomHeadersEntryR\rcustomHeaders\x12%\n" + - "\x0eproxy_protocol\x18\x05 \x01(\bR\rproxyProtocol\x12K\n" + - "\x14session_idle_timeout\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x12sessionIdleTimeout\x1a@\n" + - "\x12CustomHeadersEntry\x12\x10\n" + - "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"r\n" + - "\vPathMapping\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\x12\x16\n" + - "\x06target\x18\x02 \x01(\tR\x06target\x127\n" + - "\aoptions\x18\x03 \x01(\v2\x1d.management.PathTargetOptionsR\aoptions\"G\n" + - "\n" + - "HeaderAuth\x12\x16\n" + - "\x06header\x18\x01 \x01(\tR\x06header\x12!\n" + - "\fhashed_value\x18\x02 \x01(\tR\vhashedValue\"\xe5\x01\n" + - "\x0eAuthentication\x12\x1f\n" + - "\vsession_key\x18\x01 \x01(\tR\n" + - "sessionKey\x125\n" + - "\x17max_session_age_seconds\x18\x02 \x01(\x03R\x14maxSessionAgeSeconds\x12\x1a\n" + - "\bpassword\x18\x03 \x01(\bR\bpassword\x12\x10\n" + - "\x03pin\x18\x04 \x01(\bR\x03pin\x12\x12\n" + - "\x04oidc\x18\x05 \x01(\bR\x04oidc\x129\n" + - "\fheader_auths\x18\x06 \x03(\v2\x16.management.HeaderAuthR\vheaderAuths\"\xb8\x01\n" + - "\x12AccessRestrictions\x12#\n" + - "\rallowed_cidrs\x18\x01 \x03(\tR\fallowedCidrs\x12#\n" + - "\rblocked_cidrs\x18\x02 \x03(\tR\fblockedCidrs\x12+\n" + - "\x11allowed_countries\x18\x03 \x03(\tR\x10allowedCountries\x12+\n" + - "\x11blocked_countries\x18\x04 \x03(\tR\x10blockedCountries\"\xe6\x03\n" + - "\fProxyMapping\x126\n" + - "\x04type\x18\x01 \x01(\x0e2\".management.ProxyMappingUpdateTypeR\x04type\x12\x0e\n" + - "\x02id\x18\x02 \x01(\tR\x02id\x12\x1d\n" + - "\n" + - "account_id\x18\x03 \x01(\tR\taccountId\x12\x16\n" + - "\x06domain\x18\x04 \x01(\tR\x06domain\x12+\n" + - "\x04path\x18\x05 \x03(\v2\x17.management.PathMappingR\x04path\x12\x1d\n" + - "\n" + - "auth_token\x18\x06 \x01(\tR\tauthToken\x12.\n" + - "\x04auth\x18\a \x01(\v2\x1a.management.AuthenticationR\x04auth\x12(\n" + - "\x10pass_host_header\x18\b \x01(\bR\x0epassHostHeader\x12+\n" + - "\x11rewrite_redirects\x18\t \x01(\bR\x10rewriteRedirects\x12\x12\n" + - "\x04mode\x18\n" + - " \x01(\tR\x04mode\x12\x1f\n" + - "\vlisten_port\x18\v \x01(\x05R\n" + - "listenPort\x12O\n" + - "\x13access_restrictions\x18\f \x01(\v2\x1e.management.AccessRestrictionsR\x12accessRestrictions\"?\n" + - "\x14SendAccessLogRequest\x12'\n" + - "\x03log\x18\x01 \x01(\v2\x15.management.AccessLogR\x03log\"\x17\n" + - "\x15SendAccessLogResponse\"\x86\x04\n" + - "\tAccessLog\x128\n" + - "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x15\n" + - "\x06log_id\x18\x02 \x01(\tR\x05logId\x12\x1d\n" + - "\n" + - "account_id\x18\x03 \x01(\tR\taccountId\x12\x1d\n" + - "\n" + - "service_id\x18\x04 \x01(\tR\tserviceId\x12\x12\n" + - "\x04host\x18\x05 \x01(\tR\x04host\x12\x12\n" + - "\x04path\x18\x06 \x01(\tR\x04path\x12\x1f\n" + - "\vduration_ms\x18\a \x01(\x03R\n" + - "durationMs\x12\x16\n" + - "\x06method\x18\b \x01(\tR\x06method\x12#\n" + - "\rresponse_code\x18\t \x01(\x05R\fresponseCode\x12\x1b\n" + - "\tsource_ip\x18\n" + - " \x01(\tR\bsourceIp\x12%\n" + - "\x0eauth_mechanism\x18\v \x01(\tR\rauthMechanism\x12\x17\n" + - "\auser_id\x18\f \x01(\tR\x06userId\x12!\n" + - "\fauth_success\x18\r \x01(\bR\vauthSuccess\x12!\n" + - "\fbytes_upload\x18\x0e \x01(\x03R\vbytesUpload\x12%\n" + - "\x0ebytes_download\x18\x0f \x01(\x03R\rbytesDownload\x12\x1a\n" + - "\bprotocol\x18\x10 \x01(\tR\bprotocol\"\xf8\x01\n" + - "\x13AuthenticateRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x129\n" + - "\bpassword\x18\x03 \x01(\v2\x1b.management.PasswordRequestH\x00R\bpassword\x12*\n" + - "\x03pin\x18\x04 \x01(\v2\x16.management.PinRequestH\x00R\x03pin\x12@\n" + - "\vheader_auth\x18\x05 \x01(\v2\x1d.management.HeaderAuthRequestH\x00R\n" + - "headerAuthB\t\n" + - "\arequest\"W\n" + - "\x11HeaderAuthRequest\x12!\n" + - "\fheader_value\x18\x01 \x01(\tR\vheaderValue\x12\x1f\n" + - "\vheader_name\x18\x02 \x01(\tR\n" + - "headerName\"-\n" + - "\x0fPasswordRequest\x12\x1a\n" + - "\bpassword\x18\x01 \x01(\tR\bpassword\"\x1e\n" + - "\n" + - "PinRequest\x12\x10\n" + - "\x03pin\x18\x01 \x01(\tR\x03pin\"U\n" + - "\x14AuthenticateResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess\x12#\n" + - "\rsession_token\x18\x02 \x01(\tR\fsessionToken\"\xf3\x01\n" + - "\x17SendStatusUpdateRequest\x12\x1d\n" + - "\n" + - "service_id\x18\x01 \x01(\tR\tserviceId\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x12/\n" + - "\x06status\x18\x03 \x01(\x0e2\x17.management.ProxyStatusR\x06status\x12-\n" + - "\x12certificate_issued\x18\x04 \x01(\bR\x11certificateIssued\x12(\n" + - "\rerror_message\x18\x05 \x01(\tH\x00R\ferrorMessage\x88\x01\x01B\x10\n" + - "\x0e_error_message\"\x1a\n" + - "\x18SendStatusUpdateResponse\"\xb8\x01\n" + - "\x16CreateProxyPeerRequest\x12\x1d\n" + - "\n" + - "service_id\x18\x01 \x01(\tR\tserviceId\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x12\x14\n" + - "\x05token\x18\x03 \x01(\tR\x05token\x120\n" + - "\x14wireguard_public_key\x18\x04 \x01(\tR\x12wireguardPublicKey\x12\x18\n" + - "\acluster\x18\x05 \x01(\tR\acluster\"o\n" + - "\x17CreateProxyPeerResponse\x12\x18\n" + - "\asuccess\x18\x01 \x01(\bR\asuccess\x12(\n" + - "\rerror_message\x18\x02 \x01(\tH\x00R\ferrorMessage\x88\x01\x01B\x10\n" + - "\x0e_error_message\"e\n" + - "\x11GetOIDCURLRequest\x12\x0e\n" + - "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + - "\n" + - "account_id\x18\x02 \x01(\tR\taccountId\x12!\n" + - "\fredirect_url\x18\x03 \x01(\tR\vredirectUrl\"&\n" + - "\x12GetOIDCURLResponse\x12\x10\n" + - "\x03url\x18\x01 \x01(\tR\x03url\"U\n" + - "\x16ValidateSessionRequest\x12\x16\n" + - "\x06domain\x18\x01 \x01(\tR\x06domain\x12#\n" + - "\rsession_token\x18\x02 \x01(\tR\fsessionToken\"\x8c\x01\n" + - "\x17ValidateSessionResponse\x12\x14\n" + - "\x05valid\x18\x01 \x01(\bR\x05valid\x12\x17\n" + - "\auser_id\x18\x02 \x01(\tR\x06userId\x12\x1d\n" + - "\n" + - "user_email\x18\x03 \x01(\tR\tuserEmail\x12#\n" + - "\rdenied_reason\x18\x04 \x01(\tR\fdeniedReason*d\n" + - "\x16ProxyMappingUpdateType\x12\x17\n" + - "\x13UPDATE_TYPE_CREATED\x10\x00\x12\x18\n" + - "\x14UPDATE_TYPE_MODIFIED\x10\x01\x12\x17\n" + - "\x13UPDATE_TYPE_REMOVED\x10\x02*F\n" + - "\x0fPathRewriteMode\x12\x18\n" + - "\x14PATH_REWRITE_DEFAULT\x10\x00\x12\x19\n" + - "\x15PATH_REWRITE_PRESERVE\x10\x01*\xc8\x01\n" + - "\vProxyStatus\x12\x18\n" + - "\x14PROXY_STATUS_PENDING\x10\x00\x12\x17\n" + - "\x13PROXY_STATUS_ACTIVE\x10\x01\x12#\n" + - "\x1fPROXY_STATUS_TUNNEL_NOT_CREATED\x10\x02\x12$\n" + - " PROXY_STATUS_CERTIFICATE_PENDING\x10\x03\x12#\n" + - "\x1fPROXY_STATUS_CERTIFICATE_FAILED\x10\x04\x12\x16\n" + - "\x12PROXY_STATUS_ERROR\x10\x052\xfc\x04\n" + - "\fProxyService\x12_\n" + - "\x10GetMappingUpdate\x12#.management.GetMappingUpdateRequest\x1a$.management.GetMappingUpdateResponse0\x01\x12T\n" + - "\rSendAccessLog\x12 .management.SendAccessLogRequest\x1a!.management.SendAccessLogResponse\x12Q\n" + - "\fAuthenticate\x12\x1f.management.AuthenticateRequest\x1a .management.AuthenticateResponse\x12]\n" + - "\x10SendStatusUpdate\x12#.management.SendStatusUpdateRequest\x1a$.management.SendStatusUpdateResponse\x12Z\n" + - "\x0fCreateProxyPeer\x12\".management.CreateProxyPeerRequest\x1a#.management.CreateProxyPeerResponse\x12K\n" + - "\n" + - "GetOIDCURL\x12\x1d.management.GetOIDCURLRequest\x1a\x1e.management.GetOIDCURLResponse\x12Z\n" + - "\x0fValidateSession\x12\".management.ValidateSessionRequest\x1a#.management.ValidateSessionResponseB\bZ\x06/protob\x06proto3" +var file_proxy_service_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x30, 0x0a, 0x11, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x88, 0x01, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x42, 0x14, 0x0a, + 0x12, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, + 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, + 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, + 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, + 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x41, 0x75, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, + 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0xe5, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x12, 0x39, 0x0a, 0x0c, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x69, + 0x64, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, + 0x69, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x22, 0xe6, 0x03, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, + 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, + 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, + 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, + 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, + 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, + 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, + 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x04, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, + 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, + 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, + 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, + 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, + 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, + 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xf8, + 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0b, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x42, 0x09, + 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x57, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, + 0x6e, 0x22, 0x55, 0x0a, 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, + 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, + 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, + 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, + 0x72, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, + 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, + 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, + 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, + 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, + 0x65, 0x6e, 0x69, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, + 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, + 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, + 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, + 0x02, 0x2a, 0x46, 0x0a, 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, + 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, + 0x0a, 0x15, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, + 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, + 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, + 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, + 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, + 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, + 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, + 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( file_proxy_service_proto_rawDescOnce sync.Once - file_proxy_service_proto_rawDescData []byte + file_proxy_service_proto_rawDescData = file_proxy_service_proto_rawDesc ) func file_proxy_service_proto_rawDescGZIP() []byte { file_proxy_service_proto_rawDescOnce.Do(func() { - file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proxy_service_proto_rawDesc), len(file_proxy_service_proto_rawDesc))) + file_proxy_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_proxy_service_proto_rawDescData) }) return file_proxy_service_proto_rawDescData } var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26) -var file_proxy_service_proto_goTypes = []any{ +var file_proxy_service_proto_goTypes = []interface{}{ (ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType (PathRewriteMode)(0), // 1: management.PathRewriteMode (ProxyStatus)(0), // 2: management.ProxyStatus @@ -2146,19 +2373,321 @@ func file_proxy_service_proto_init() { if File_proxy_service_proto != nil { return } - file_proxy_service_proto_msgTypes[0].OneofWrappers = []any{} - file_proxy_service_proto_msgTypes[12].OneofWrappers = []any{ + if !protoimpl.UnsafeEnabled { + file_proxy_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyCapabilities); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMappingUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMappingUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathTargetOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderAuth); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Authentication); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessRestrictions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProxyMapping); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendAccessLogRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendAccessLogResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AccessLog); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderAuthRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PasswordRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PinRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendStatusUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendStatusUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateProxyPeerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateProxyPeerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOIDCURLRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOIDCURLResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proxy_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSessionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proxy_service_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[12].OneofWrappers = []interface{}{ (*AuthenticateRequest_Password)(nil), (*AuthenticateRequest_Pin)(nil), (*AuthenticateRequest_HeaderAuth)(nil), } - file_proxy_service_proto_msgTypes[17].OneofWrappers = []any{} - file_proxy_service_proto_msgTypes[20].OneofWrappers = []any{} + file_proxy_service_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_proxy_service_proto_msgTypes[20].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_proxy_service_proto_rawDesc), len(file_proxy_service_proto_rawDesc)), + RawDescriptor: file_proxy_service_proto_rawDesc, NumEnums: 3, NumMessages: 26, NumExtensions: 0, @@ -2170,6 +2699,7 @@ func file_proxy_service_proto_init() { MessageInfos: file_proxy_service_proto_msgTypes, }.Build() File_proxy_service_proto = out.File + file_proxy_service_proto_rawDesc = nil file_proxy_service_proto_goTypes = nil file_proxy_service_proto_depIdxs = nil } diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go index ed1b63435..b10b05617 100644 --- a/shared/relay/client/client.go +++ b/shared/relay/client/client.go @@ -333,7 +333,7 @@ func (c *Client) connect(ctx context.Context) (*RelayAddr, error) { dialers := c.getDialers() rd := dialer.NewRaceDial(c.log, dialer.DefaultConnectionTimeout, c.connectionURL, dialers...) - conn, err := rd.Dial() + conn, err := rd.Dial(ctx) if err != nil { return nil, err } diff --git a/shared/relay/client/dialer/quic/quic.go b/shared/relay/client/dialer/quic/quic.go index 78462837d..2d7b00a80 100644 --- a/shared/relay/client/dialer/quic/quic.go +++ b/shared/relay/client/dialer/quic/quic.go @@ -89,12 +89,12 @@ func prepareURL(address string) (string, error) { finalHost, finalPort, err := net.SplitHostPort(host) if err != nil { if strings.Contains(err.Error(), "missing port") { - return host + ":" + defaultPort, nil + return net.JoinHostPort(strings.Trim(host, "[]"), defaultPort), nil } // return any other split error as is return "", err } - return finalHost + ":" + finalPort, nil + return net.JoinHostPort(finalHost, finalPort), nil } diff --git a/shared/relay/client/dialer/race_dialer.go b/shared/relay/client/dialer/race_dialer.go index 0550fc63e..34359d17e 100644 --- a/shared/relay/client/dialer/race_dialer.go +++ b/shared/relay/client/dialer/race_dialer.go @@ -40,10 +40,10 @@ func NewRaceDial(log *log.Entry, connectionTimeout time.Duration, serverURL stri } } -func (r *RaceDial) Dial() (net.Conn, error) { +func (r *RaceDial) Dial(ctx context.Context) (net.Conn, error) { connChan := make(chan dialResult, len(r.dialerFns)) winnerConn := make(chan net.Conn, 1) - abortCtx, abort := context.WithCancel(context.Background()) + abortCtx, abort := context.WithCancel(ctx) defer abort() for _, dfn := range r.dialerFns { diff --git a/shared/relay/client/dialer/race_dialer_test.go b/shared/relay/client/dialer/race_dialer_test.go index d216ec5e7..aa18df578 100644 --- a/shared/relay/client/dialer/race_dialer_test.go +++ b/shared/relay/client/dialer/race_dialer_test.go @@ -78,7 +78,7 @@ func TestRaceDialEmptyDialers(t *testing.T) { serverURL := "test.server.com" rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err == nil { t.Errorf("Expected an error with empty dialers, got nil") } @@ -104,7 +104,7 @@ func TestRaceDialSingleSuccessfulDialer(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err != nil { t.Errorf("Expected no error, got %v", err) } @@ -137,7 +137,7 @@ func TestRaceDialMultipleDialersWithOneSuccess(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err != nil { t.Errorf("Expected no error, got %v", err) } @@ -160,7 +160,7 @@ func TestRaceDialTimeout(t *testing.T) { } rd := NewRaceDial(logger, 3*time.Second, serverURL, mockDialer) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err == nil { t.Errorf("Expected an error, got nil") } @@ -188,7 +188,7 @@ func TestRaceDialAllDialersFail(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err == nil { t.Errorf("Expected an error, got nil") } @@ -230,7 +230,7 @@ func TestRaceDialFirstSuccessfulDialerWins(t *testing.T) { } rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2) - conn, err := rd.Dial() + conn, err := rd.Dial(context.Background()) if err != nil { t.Errorf("Expected no error, got %v", err) } diff --git a/tools/idp-migrate/DEVELOPMENT.md b/tools/idp-migrate/DEVELOPMENT.md new file mode 100644 index 000000000..5697ead40 --- /dev/null +++ b/tools/idp-migrate/DEVELOPMENT.md @@ -0,0 +1,209 @@ +# IdP Migration Tool — Developer Guide + +## Overview + +This tool migrates NetBird deployments from an external IdP (Auth0, Zitadel, Okta, etc.) to the embedded Dex IdP introduced in v0.62.0. It does two things: + +1. **DB migration** — Re-encodes every user ID from `{original_id}` to Dex's protobuf-encoded format `base64(proto{original_id, connector_id})`. +2. **Config generation** — Transforms `management.json`: removes `IdpManagerConfig`, `PKCEAuthorizationFlow`, and `DeviceAuthorizationFlow`; strips `HttpConfig` to only `CertFile`/`CertKey`; adds `EmbeddedIdP` with the static connector configuration. + +## Code Layout + +``` +tools/idp-migrate/ +├── config.go # migrationConfig struct, CLI flags, env vars, validation +├── main.go # CLI entry point, migration phases, config generation +├── main_test.go # 8 test functions (18 subtests) covering config, connector, URL builder, config generation +└── DEVELOPMENT.md # this file + +management/server/idp/migration/ +├── migration.go # Server interface, MigrateUsersToStaticConnectors(), PopulateUserInfo(), migrateUser(), reconcileActivityStore() +├── migration_test.go # 6 top-level tests (with subtests) using hand-written mocks +└── store.go # Store, EventStore interfaces, SchemaCheck, RequiredSchema, SchemaError types + +management/server/store/ +└── sql_store_idp_migration.go # CheckSchema(), ListUsers(), UpdateUserInfo(), UpdateUserID(), txDeferFKConstraints() on SqlStore + +management/server/activity/store/ +├── sql_store_idp_migration.go # UpdateUserID() on activity Store +└── sql_store_idp_migration_test.go # 5 subtests for activity UpdateUserID + +``` + +## Release / Distribution + +The tool is included in `.goreleaser.yaml` as the `netbird-idp-migrate` build target. Each NetBird release produces pre-built archives for Linux (amd64, arm64, arm) that are uploaded to GitHub Releases. The archive naming convention is: + +``` +netbird-idp-migrate__linux_.tar.gz +``` + +The build requires `CGO_ENABLED=1` because it links the SQLite driver used by `SqlStore`. The cross-compilation setup (CC env for arm64/arm) mirrors the `netbird-mgmt` build. + +## CLI Flags + +| Flag | Type | Default | Description | +|------|------|---------|-------------| +| `--config` | string | *(required)* | Path to management.json | +| `--datadir` | string | *(required)* | Data directory (containing store.db / events.db) | +| `--idp-seed-info` | string | *(required)* | Base64-encoded connector JSON | +| `--domain` | string | `""` | Sets both dashboard and API domain (convenience shorthand) | +| `--dashboard-domain` | string | *(required)* | Dashboard domain (for redirect URIs) | +| `--api-domain` | string | *(required)* | API domain (for Dex issuer and callback URLs) | +| `--dry-run` | bool | `false` | Preview changes without writing | +| `--force` | bool | `false` | Skip interactive confirmation prompt | +| `--skip-config` | bool | `false` | Skip config generation (DB-only migration) | +| `--skip-populate-user-info` | bool | `false` | Skip populating user info (user ID migration only) | +| `--log-level` | string | `"info"` | Log level (debug, info, warn, error) | + +## Environment Variables + +All flags can be overridden via environment variables. Env vars take precedence over flags. + +| Env Var | Overrides | +|---------|-----------| +| `NETBIRD_DOMAIN` | Sets both `--dashboard-domain` and `--api-domain` | +| `NETBIRD_API_URL` | `--api-domain` | +| `NETBIRD_DASHBOARD_URL` | `--dashboard-domain` | +| `NETBIRD_CONFIG_PATH` | `--config` | +| `NETBIRD_DATA_DIR` | `--datadir` | +| `NETBIRD_IDP_SEED_INFO` | `--idp-seed-info` | +| `NETBIRD_DRY_RUN` | `--dry-run` (set to `"true"`) | +| `NETBIRD_FORCE` | `--force` (set to `"true"`) | +| `NETBIRD_SKIP_CONFIG` | `--skip-config` (set to `"true"`) | +| `NETBIRD_SKIP_POPULATE_USER_INFO` | `--skip-populate-user-info` (set to `"true"`) | +| `NETBIRD_LOG_LEVEL` | `--log-level` | + +Resolution order: CLI flags are parsed first, then `--domain` sets both URLs, then `NETBIRD_DOMAIN` overrides both, then `NETBIRD_API_URL` / `NETBIRD_DASHBOARD_URL` override individually. After all resolution, `validateConfig()` ensures all required fields are set. + +## Migration Flow + +### Phase 0: Schema Validation + +`validateSchema()` opens the store and calls `CheckSchema(RequiredSchema)` to verify that all tables and columns required by the migration exist in the database. If anything is missing, the tool exits with a descriptive error instructing the operator to start the management server (v0.66.4+) at least once so that automatic GORM migrations create the required schema. + +### Phase 1: Populate User Info + +Unless `--skip-populate-user-info` is set, `populateUserInfoFromIDP()` runs before connector resolution: + +1. Creates an IDP manager from the existing `IdpManagerConfig` in management.json. +2. Calls `idpManager.GetAllAccounts()` to fetch email and name for all users from the external IDP. +3. Calls `migration.PopulateUserInfo()` which iterates over all store users, skipping service users and users that already have both email and name populated. For Dex-encoded user IDs, it decodes back to the original IDP ID for lookup. +4. Updates the store with any missing email/name values. + +This ensures user contact info is preserved before the ID migration makes the original IDP IDs inaccessible. + +### Phase 2: Connector Decoding + +`decodeConnectorConfig()` base64-decodes and JSON-unmarshals the connector JSON provided via `--idp-seed-info` (or `NETBIRD_IDP_SEED_INFO`). It validates that the connector ID is non-empty. There is no auto-detection or fallback — the operator must provide the full connector configuration. + +### Phase 3: DB Migration + +`migrateDB()` orchestrates the database migration: + +1. `openStores()` opens the main store (`SqlStore`) and activity store (non-fatal if missing). +2. Type-asserts both to `migration.Store` / `migration.EventStore`. +3. `previewUsers()` scans all users — counts pending vs already-migrated (using `DecodeDexUserID`). +4. `confirmPrompt()` asks for interactive confirmation (unless `--force` or `--dry-run`). +5. Calls `migration.MigrateUsersToStaticConnectors(srv, conn)`: + - **Reconciliation pass**: fixes activity store references for users already migrated in the main DB but whose events still reference old IDs (from a previous partial failure). + - **Main loop**: for each non-migrated user, calls `migrateUser()` which atomically updates the user ID in both the main store and activity store. + - **Dry-run**: logs what would happen, skips all writes. + +`SqlStore.UpdateUserID()` atomically updates the user's primary key and all foreign key references (peers, PATs, groups, policies, jobs, etc.) in a single transaction. + +### Phase 4: Config Generation + +Unless `--skip-config` is set, `generateConfig()` runs: + +1. **Read** — loads existing `management.json` as raw JSON to preserve unknown fields. + +2. **Strip** — removes keys that are no longer needed: + - `IdpManagerConfig` + - `PKCEAuthorizationFlow` + - `DeviceAuthorizationFlow` + - All `HttpConfig` fields except `CertFile` and `CertKey` + +3. **Add EmbeddedIdP** — inserts a minimal section with: + - `Enabled: true` + - `Issuer` built from `--api-domain` + `/oauth2` + - `DashboardRedirectURIs` built from `--dashboard-domain` + `/nb-auth` and `/nb-silent-auth` + - `StaticConnectors` containing the decoded connector, with `redirectURI` overridden to `--api-domain` + `/oauth2/callback` + +4. **Write** — backs up original as `management.json.bak`, writes new config. In dry-run mode, prints to stdout instead. + +## Interface Decoupling + +Migration methods (`ListUsers`, `UpdateUserID`) are **not** on the core `store.Store` or `activity.Store` interfaces. Instead, they're defined in `migration/store.go`: + +```go +type Store interface { + ListUsers(ctx context.Context) ([]*types.User, error) + UpdateUserID(ctx context.Context, accountID, oldUserID, newUserID string) error + UpdateUserInfo(ctx context.Context, userID, email, name string) error + CheckSchema(checks []SchemaCheck) []SchemaError +} + +type EventStore interface { + UpdateUserID(ctx context.Context, oldUserID, newUserID string) error +} +``` + +A `Server` interface wraps both stores for dependency injection: + +```go +type Server interface { + Store() Store + EventStore() EventStore // may return nil +} +``` + +The concrete `SqlStore` types already have these methods (in their respective `sql_store_idp_migration.go` files), so they satisfy the interfaces via Go's structural typing — zero changes needed on the core store interfaces. At runtime, the standalone tool type-asserts: + +```go +migStore, ok := mainStore.(migration.Store) +``` + +This keeps migration concerns completely separate from the core store contract. + +## Dex User ID Encoding + +`EncodeDexUserID(userID, connectorID)` produces a manually-encoded protobuf with two string fields, then base64-encodes the result (raw, no padding). `DecodeDexUserID` reverses this. The migration loop uses `DecodeDexUserID` to detect already-migrated users (decode succeeds → skip). + +See `idp/dex/provider.go` for the implementation. + +## Standalone Tool + +The standalone tool (`tools/idp-migrate/main.go`) is the primary migration entry point. It opens stores directly, runs schema validation, populates user info from the external IDP, migrates user IDs, and generates the new config — then exits. Configuration is handled entirely through `config.go` which parses CLI flags and environment variables. + +## Running Tests + +```bash +# Migration library +go test -v ./management/server/idp/migration/... + +# Standalone tool +go test -v ./tools/idp-migrate/... + +# Activity store migration tests +go test -v -run TestUpdateUserID ./management/server/activity/store/... + +# Build locally +go build ./tools/idp-migrate/ +``` + +## Clean Removal + +When migration tooling is no longer needed, delete: + +1. `tools/idp-migrate/` — entire directory +2. `management/server/idp/migration/` — entire directory +3. `management/server/store/sql_store_idp_migration.go` — migration methods on main SqlStore +4. `management/server/activity/store/sql_store_idp_migration.go` — migration method on activity Store +5. `management/server/activity/store/sql_store_idp_migration_test.go` — tests for the above +6. In `.goreleaser.yaml`: + - Remove the `netbird-idp-migrate` build entry + - Remove the `netbird-idp-migrate` archive entry +7. Run `go mod tidy` + +No core interfaces or mocks need editing — that's the point of the decoupling. diff --git a/tools/idp-migrate/LICENSE b/tools/idp-migrate/LICENSE new file mode 100644 index 000000000..be3f7b28e --- /dev/null +++ b/tools/idp-migrate/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/tools/idp-migrate/config.go b/tools/idp-migrate/config.go new file mode 100644 index 000000000..f4d6b9ea2 --- /dev/null +++ b/tools/idp-migrate/config.go @@ -0,0 +1,174 @@ +package main + +import ( + "flag" + "fmt" + "os" + "strconv" + + "github.com/netbirdio/netbird/util" +) + +type migrationConfig struct { + // Data + dashboardURL string + apiURL string + configPath string + dataDir string + idpSeedInfo string + + // Options + dryRun bool + force bool + skipConfig bool + skipPopulateUserInfo bool + + // Logging + logLevel string +} + +func config() (*migrationConfig, error) { + cfg, err := configFromArgs(os.Args[1:]) + if err != nil { + return nil, err + } + + if err := util.InitLog(cfg.logLevel, util.LogConsole); err != nil { + return nil, fmt.Errorf("init logger: %w", err) + } + + return cfg, nil +} + +func configFromArgs(args []string) (*migrationConfig, error) { + var cfg migrationConfig + var domain string + + fs := flag.NewFlagSet("netbird-idp-migrate", flag.ContinueOnError) + fs.StringVar(&domain, "domain", "", "domain for both dashboard and API") + fs.StringVar(&cfg.dashboardURL, "dashboard-url", "", "dashboard URL") + fs.StringVar(&cfg.apiURL, "api-url", "", "API URL") + fs.StringVar(&cfg.configPath, "config", "", "path to management.json (required)") + fs.StringVar(&cfg.dataDir, "datadir", "", "override data directory from config") + fs.StringVar(&cfg.idpSeedInfo, "idp-seed-info", "", "base64-encoded connector JSON (overrides auto-detection)") + fs.BoolVar(&cfg.dryRun, "dry-run", false, "preview changes without writing") + fs.BoolVar(&cfg.force, "force", false, "skip confirmation prompt") + fs.BoolVar(&cfg.skipConfig, "skip-config", false, "skip config generation (DB migration only)") + fs.BoolVar(&cfg.skipPopulateUserInfo, "skip-populate-user-info", false, "skip populating user info (user id migration only)") + fs.StringVar(&cfg.logLevel, "log-level", "info", "log level (debug, info, warn, error)") + + if err := fs.Parse(args); err != nil { + return nil, err + } + + applyOverrides(&cfg, domain) + + if err := validateConfig(&cfg); err != nil { + return nil, err + } + + return &cfg, nil +} + +// applyOverrides resolves domain configuration from broad to narrow sources. +// The most granular value always wins: +// +// --domain flag (broadest, only fills blanks) +// NETBIRD_DOMAIN env (overrides flags, sets both) +// --api-domain / --dashboard-domain flags (more specific than --domain) +// NETBIRD_API_URL / NETBIRD_DASHBOARD_URL env (most specific, always wins) +// +// Other env vars unconditionally override their corresponding flags. +func applyOverrides(cfg *migrationConfig, domain string) { + // --domain is a convenience shorthand: only fills in values not already + // set by the more specific --api-domain / --dashboard-domain flags. + if domain != "" { + if cfg.apiURL == "" { + cfg.apiURL = domain + } + if cfg.dashboardURL == "" { + cfg.dashboardURL = domain + } + } + + // Env vars override flags. Broad env var first, then narrow ones on top, + // so the most granular value always wins. + if val, ok := os.LookupEnv("NETBIRD_DOMAIN"); ok { + cfg.dashboardURL = val + cfg.apiURL = val + } + + if val, ok := os.LookupEnv("NETBIRD_API_URL"); ok { + cfg.apiURL = val + } + + if val, ok := os.LookupEnv("NETBIRD_DASHBOARD_URL"); ok { + cfg.dashboardURL = val + } + + if val, ok := os.LookupEnv("NETBIRD_CONFIG_PATH"); ok { + cfg.configPath = val + } + + if val, ok := os.LookupEnv("NETBIRD_DATA_DIR"); ok { + cfg.dataDir = val + } + + if val, ok := os.LookupEnv("NETBIRD_IDP_SEED_INFO"); ok { + cfg.idpSeedInfo = val + } + + // Enforce dry run if any value is provided + if sval, ok := os.LookupEnv("NETBIRD_DRY_RUN"); ok { + if val, err := strconv.ParseBool(sval); err == nil { + cfg.dryRun = val + } + } + + cfg.dryRun = parseBool("NETBIRD_DRY_RUN", cfg.dryRun) + cfg.force = parseBool("NETBIRD_FORCE", cfg.force) + cfg.skipConfig = parseBool("NETBIRD_SKIP_CONFIG", cfg.skipConfig) + cfg.skipPopulateUserInfo = parseBool("NETBIRD_SKIP_POPULATE_USER_INFO", cfg.skipPopulateUserInfo) + + if val, ok := os.LookupEnv("NETBIRD_LOG_LEVEL"); ok { + cfg.logLevel = val + } +} + +func parseBool(varName string, defaultVal bool) bool { + stringValue, ok := os.LookupEnv(varName) + if !ok { + return defaultVal + } + + boolValue, err := strconv.ParseBool(stringValue) + if err != nil { + return defaultVal + } + + return boolValue +} + +func validateConfig(cfg *migrationConfig) error { + if cfg.configPath == "" { + return fmt.Errorf("--config is required") + } + + if cfg.dataDir == "" { + return fmt.Errorf("--datadir is required") + } + + if cfg.idpSeedInfo == "" { + return fmt.Errorf("--idp-seed-info is required") + } + + if cfg.apiURL == "" { + return fmt.Errorf("--api-domain is required") + } + + if cfg.dashboardURL == "" { + return fmt.Errorf("--dashboard-domain is required") + } + + return nil +} diff --git a/tools/idp-migrate/main.go b/tools/idp-migrate/main.go new file mode 100644 index 000000000..a8cba0750 --- /dev/null +++ b/tools/idp-migrate/main.go @@ -0,0 +1,449 @@ +// Package main provides a standalone CLI tool to migrate user IDs from an +// external IdP format to the embedded Dex IdP format used by NetBird >= v0.62.0. +// +// This tool reads management.json to auto-detect the current external IdP +// configuration (issuer, clientID, clientSecret, type) and re-encodes all user +// IDs in the database to the Dex protobuf-encoded format. It works independently +// of migrate.sh and the combined server, allowing operators to migrate their +// database before switching to the combined server. +// +// Usage: +// +// netbird-idp-migrate --config /etc/netbird/management.json [--dry-run] [--force] +package main + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "maps" + "net/url" + "os" + "strings" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/idp/dex" + nbconfig "github.com/netbirdio/netbird/management/internals/server/config" + activitystore "github.com/netbirdio/netbird/management/server/activity/store" + "github.com/netbirdio/netbird/management/server/idp" + "github.com/netbirdio/netbird/management/server/idp/migration" + "github.com/netbirdio/netbird/management/server/store" + "github.com/netbirdio/netbird/management/server/types" + "github.com/netbirdio/netbird/util" + "github.com/netbirdio/netbird/util/crypt" +) + +// migrationServer implements migration.Server by wrapping the migration-specific interfaces. +type migrationServer struct { + store migration.Store + eventStore migration.EventStore +} + +func (s *migrationServer) Store() migration.Store { return s.store } +func (s *migrationServer) EventStore() migration.EventStore { return s.eventStore } + +func main() { + cfg, err := config() + if err != nil { + log.Fatalf("config error: %v", err) + } + + if err := run(cfg); err != nil { + log.Fatalf("migration failed: %v", err) + } + + if !cfg.dryRun { + printPostMigrationInstructions(cfg) + } +} + +func run(cfg *migrationConfig) error { + mgmtConfig := &nbconfig.Config{} + if _, err := util.ReadJsonWithEnvSub(cfg.configPath, mgmtConfig); err != nil { + return err + } + + // Validate the database schema before attempting any operations. + if err := validateSchema(mgmtConfig, cfg.dataDir); err != nil { + return err + } + + if !cfg.skipPopulateUserInfo { + err := populateUserInfoFromIDP(cfg, mgmtConfig) + if err != nil { + return fmt.Errorf("populate user info: %w", err) + } + } + + connectorConfig, err := decodeConnectorConfig(cfg.idpSeedInfo) + if err != nil { + return fmt.Errorf("resolve connector: %w", err) + } + + log.Infof( + "resolved connector: type=%s, id=%s, name=%s", + connectorConfig.Type, + connectorConfig.ID, + connectorConfig.Name, + ) + + if err := migrateDB(cfg, mgmtConfig, connectorConfig); err != nil { + return err + } + + if cfg.skipConfig { + log.Info("skipping config generation (--skip-config)") + return nil + } + + return generateConfig(cfg, connectorConfig) +} + +// validateSchema opens the store and checks that all required tables and columns +// exist. If anything is missing, it returns a descriptive error telling the user +// to upgrade their management server. +func validateSchema(mgmtConfig *nbconfig.Config, dataDir string) error { + ctx := context.Background() + migStore, migEventStore, cleanup, err := openStores(ctx, mgmtConfig, dataDir) + if err != nil { + return err + } + defer cleanup() + + errs := migStore.CheckSchema(migration.RequiredSchema) + if len(errs) > 0 { + return fmt.Errorf("%s", formatSchemaErrors(errs)) + } + + if migEventStore != nil { + eventErrs := migEventStore.CheckSchema(migration.RequiredEventSchema) + if len(eventErrs) > 0 { + return fmt.Errorf("activity store schema check failed (upgrade management server first):\n%s", formatSchemaErrors(eventErrs)) + } + } + + log.Info("database schema check passed") + return nil +} + +// formatSchemaErrors returns a user-friendly message listing all missing schema +// elements and instructing the operator to upgrade. +func formatSchemaErrors(errs []migration.SchemaError) string { + var b strings.Builder + b.WriteString("database schema is incomplete — the following tables/columns are missing:\n") + for _, e := range errs { + fmt.Fprintf(&b, " - %s\n", e.String()) + } + b.WriteString("\nPlease start the NetBird management server (v0.66.4+) at least once so that automatic database migrations create the required schema, then re-run this tool.\n") + return b.String() +} + +// populateUserInfoFromIDP creates an IDP manager from the config, fetches all +// user data (email, name) from the external IDP, and updates the store for users +// that are missing this information. +func populateUserInfoFromIDP(cfg *migrationConfig, mgmtConfig *nbconfig.Config) error { + ctx := context.Background() + + if mgmtConfig.IdpManagerConfig == nil { + return fmt.Errorf("IdpManagerConfig is not set in management.json; cannot fetch user info from IDP") + } + + idpManager, err := idp.NewManager(ctx, *mgmtConfig.IdpManagerConfig, nil) + if err != nil { + return fmt.Errorf("create IDP manager: %w", err) + } + if idpManager == nil { + return fmt.Errorf("IDP manager type is 'none' or empty; cannot fetch user info") + } + + log.Infof("created IDP manager (type: %s)", mgmtConfig.IdpManagerConfig.ManagerType) + + migStore, _, cleanup, err := openStores(ctx, mgmtConfig, cfg.dataDir) + if err != nil { + return err + } + defer cleanup() + + srv := &migrationServer{store: migStore} + return migration.PopulateUserInfo(srv, idpManager, cfg.dryRun) +} + +// openStores opens the main and activity stores, returning migration-specific interfaces. +// The caller must call the returned cleanup function to close the stores. +func openStores(ctx context.Context, cfg *nbconfig.Config, dataDir string) (migration.Store, migration.EventStore, func(), error) { + engine := cfg.StoreConfig.Engine + if engine == "" { + engine = types.SqliteStoreEngine + } + + mainStore, err := store.NewStore(ctx, engine, dataDir, nil, true) + if err != nil { + return nil, nil, nil, fmt.Errorf("open main store: %w", err) + } + + if cfg.DataStoreEncryptionKey != "" { + fieldEncrypt, err := crypt.NewFieldEncrypt(cfg.DataStoreEncryptionKey) + if err != nil { + _ = mainStore.Close(ctx) + return nil, nil, nil, fmt.Errorf("init field encryption: %w", err) + } + mainStore.SetFieldEncrypt(fieldEncrypt) + } + + migStore, ok := mainStore.(migration.Store) + if !ok { + _ = mainStore.Close(ctx) + return nil, nil, nil, fmt.Errorf("store does not support migration operations (ListUsers/UpdateUserID)") + } + + cleanup := func() { _ = mainStore.Close(ctx) } + + var migEventStore migration.EventStore + actStore, err := activitystore.NewSqlStore(ctx, dataDir, cfg.DataStoreEncryptionKey) + if err != nil { + log.Warnf("could not open activity store (events.db may not exist): %v", err) + } else { + migEventStore = actStore + prevCleanup := cleanup + cleanup = func() { _ = actStore.Close(ctx); prevCleanup() } + } + + return migStore, migEventStore, cleanup, nil +} + +// migrateDB opens the stores, previews pending users, and runs the DB migration. +func migrateDB(cfg *migrationConfig, mgmtConfig *nbconfig.Config, connectorConfig *dex.Connector) error { + ctx := context.Background() + + migStore, migEventStore, cleanup, err := openStores(ctx, mgmtConfig, cfg.dataDir) + if err != nil { + return err + } + defer cleanup() + + pending, err := previewUsers(ctx, migStore) + if err != nil { + return err + } + + if cfg.dryRun { + if err := os.Setenv("NB_IDP_MIGRATION_DRY_RUN", "true"); err != nil { + return fmt.Errorf("set dry-run env: %w", err) + } + defer os.Unsetenv("NB_IDP_MIGRATION_DRY_RUN") //nolint:errcheck + } + + if !cfg.dryRun && !cfg.force { + if !confirmPrompt(pending) { + log.Info("migration cancelled by user") + return nil + } + } + + srv := &migrationServer{store: migStore, eventStore: migEventStore} + if err := migration.MigrateUsersToStaticConnectors(srv, connectorConfig); err != nil { + return fmt.Errorf("migrate users: %w", err) + } + + if !cfg.dryRun { + log.Info("DB migration completed successfully") + } + return nil +} + +// previewUsers counts pending vs already-migrated users and logs a summary. +// Returns the number of users still needing migration. +func previewUsers(ctx context.Context, migStore migration.Store) (int, error) { + users, err := migStore.ListUsers(ctx) + if err != nil { + return 0, fmt.Errorf("list users: %w", err) + } + + var pending, alreadyMigrated int + for _, u := range users { + if _, _, decErr := dex.DecodeDexUserID(u.Id); decErr == nil { + alreadyMigrated++ + } else { + pending++ + } + } + + log.Infof("found %d total users: %d pending migration, %d already migrated", len(users), pending, alreadyMigrated) + return pending, nil +} + +// confirmPrompt asks the user for interactive confirmation. Returns true if they accept. +func confirmPrompt(pending int) bool { + log.Infof("About to migrate %d users. This cannot be easily undone. Continue? [y/N] ", pending) + reader := bufio.NewReader(os.Stdin) + answer, _ := reader.ReadString('\n') + answer = strings.TrimSpace(strings.ToLower(answer)) + return answer == "y" || answer == "yes" +} + +// decodeConnectorConfig base64-decodes and JSON-unmarshals a connector. +func decodeConnectorConfig(encoded string) (*dex.Connector, error) { + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + return nil, fmt.Errorf("base64 decode: %w", err) + } + + var conn dex.Connector + if err := json.Unmarshal(decoded, &conn); err != nil { + return nil, fmt.Errorf("json unmarshal: %w", err) + } + + if conn.ID == "" { + return nil, fmt.Errorf("connector ID is empty") + } + + return &conn, nil +} + +// generateConfig reads the existing management.json as raw JSON, removes +// IdpManagerConfig, adds EmbeddedIdP, updates HttpConfig fields, and writes +// the result. In dry-run mode, it prints the new config to stdout instead. +func generateConfig(cfg *migrationConfig, connectorConfig *dex.Connector) error { + // Read existing config as raw JSON to preserve all fields + raw, err := os.ReadFile(cfg.configPath) + if err != nil { + return fmt.Errorf("read config file: %w", err) + } + + var configMap map[string]any + if err := json.Unmarshal(raw, &configMap); err != nil { + return fmt.Errorf("parse config JSON: %w", err) + } + + // Remove unused information + delete(configMap, "IdpManagerConfig") + delete(configMap, "PKCEAuthorizationFlow") + delete(configMap, "DeviceAuthorizationFlow") + + httpConfig, ok := configMap["HttpConfig"].(map[string]any) + if httpConfig != nil && ok { + certFilePath := httpConfig["CertFile"] + certKeyPath := httpConfig["CertKey"] + + delete(configMap, "HttpConfig") + + configMap["HttpConfig"] = map[string]any{ + "CertFile": certFilePath, + "CertKey": certKeyPath, + } + } + + // Ensure the connector's redirectURI points to the management server (Dex callback), + // not the external IdP. The auto-detection may have used the IdP issuer URL. + connConfig := make(map[string]any, len(connectorConfig.Config)) + maps.Copy(connConfig, connectorConfig.Config) + + redirectURI, err := buildURL(cfg.apiURL, "/oauth2/callback") + if err != nil { + return fmt.Errorf("build redirect URI: %w", err) + } + connConfig["redirectURI"] = redirectURI + + issuer, err := buildURL(cfg.apiURL, "/oauth2") + if err != nil { + return fmt.Errorf("build issuer URL: %w", err) + } + + dashboardRedirectURL, err := buildURL(cfg.dashboardURL, "/nb-auth") + if err != nil { + return fmt.Errorf("build dashboard redirect URL: %w", err) + } + + dashboardSilentRedirectURL, err := buildURL(cfg.dashboardURL, "/nb-silent-auth") + if err != nil { + return fmt.Errorf("build dashboard silent redirect URL: %w", err) + } + + // Add minimal EmbeddedIdP section + configMap["EmbeddedIdP"] = map[string]any{ + "Enabled": true, + "Issuer": issuer, + "DashboardRedirectURIs": []string{ + dashboardRedirectURL, + dashboardSilentRedirectURL, + }, + "StaticConnectors": []any{ + map[string]any{ + "type": connectorConfig.Type, + "name": connectorConfig.Name, + "id": connectorConfig.ID, + "config": connConfig, + }, + }, + } + + newJSON, err := json.MarshalIndent(configMap, "", " ") + if err != nil { + return fmt.Errorf("marshal new config: %w", err) + } + + if cfg.dryRun { + log.Info("[DRY RUN] new management.json would be:") + log.Infoln(string(newJSON)) + return nil + } + + // Backup original + backupPath := cfg.configPath + ".bak" + if err := os.WriteFile(backupPath, raw, 0o600); err != nil { + return fmt.Errorf("write backup: %w", err) + } + log.Infof("backed up original config to %s", backupPath) + + // Write new config + if err := os.WriteFile(cfg.configPath, newJSON, 0o600); err != nil { + return fmt.Errorf("write new config: %w", err) + } + log.Infof("wrote new config to %s", cfg.configPath) + + return nil +} + +func buildURL(uri, path string) (string, error) { + // Case for domain without scheme, e.g. "example.com" or "example.com:8080" + if !strings.HasPrefix(uri, "http://") && !strings.HasPrefix(uri, "https://") { + uri = "https://" + uri + } + + val, err := url.JoinPath(uri, path) + if err != nil { + return "", err + } + + return val, nil +} + +func printPostMigrationInstructions(cfg *migrationConfig) { + authAuthority, err := buildURL(cfg.apiURL, "/oauth2") + if err != nil { + authAuthority = "https:///oauth2" + } + + log.Info("Congratulations! You have successfully migrated your NetBird management server to the embedded Dex IdP.") + log.Info("Next steps:") + log.Info("1. Make sure the following environment variables are set for your dashboard server:") + log.Infof(` +AUTH_AUDIENCE=netbird-dashboard +AUTH_CLIENT_ID=netbird-dashboard +AUTH_AUTHORITY=%s +AUTH_SUPPORTED_SCOPES=openid profile email groups +AUTH_REDIRECT_URI=/nb-auth +AUTH_SILENT_REDIRECT_URI=/nb-silent-auth + `, + authAuthority, + ) + log.Info("2. Make sure you restart the dashboard & management servers to pick up the new config and environment variables.") + log.Info("eg. docker compose up -d --force-recreate management dashboard") + log.Info("3. Optional: If you have a reverse proxy configured, make sure the path `/oauth2/*` points to the management api server.") +} + +// Compile-time check that migrationServer implements migration.Server. +var _ migration.Server = (*migrationServer)(nil) diff --git a/tools/idp-migrate/main_test.go b/tools/idp-migrate/main_test.go new file mode 100644 index 000000000..75d0bd7eb --- /dev/null +++ b/tools/idp-migrate/main_test.go @@ -0,0 +1,487 @@ +package main + +import ( + "encoding/base64" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/idp/dex" + "github.com/netbirdio/netbird/management/server/idp/migration" +) + +// TestMigrationServerInterface is a compile-time check that migrationServer +// implements the migration.Server interface. +func TestMigrationServerInterface(t *testing.T) { + var _ migration.Server = (*migrationServer)(nil) +} + +func TestDecodeConnectorConfig(t *testing.T) { + conn := dex.Connector{ + Type: "oidc", + Name: "test", + ID: "test-id", + Config: map[string]any{ + "issuer": "https://example.com", + "clientID": "cid", + "clientSecret": "csecret", + }, + } + + data, err := json.Marshal(conn) + require.NoError(t, err) + encoded := base64.StdEncoding.EncodeToString(data) + + result, err := decodeConnectorConfig(encoded) + require.NoError(t, err) + assert.Equal(t, "test-id", result.ID) + assert.Equal(t, "oidc", result.Type) + assert.Equal(t, "https://example.com", result.Config["issuer"]) +} + +func TestDecodeConnectorConfig_InvalidBase64(t *testing.T) { + _, err := decodeConnectorConfig("not-valid-base64!!!") + require.Error(t, err) + assert.Contains(t, err.Error(), "base64 decode") +} + +func TestDecodeConnectorConfig_InvalidJSON(t *testing.T) { + encoded := base64.StdEncoding.EncodeToString([]byte("not json")) + _, err := decodeConnectorConfig(encoded) + require.Error(t, err) + assert.Contains(t, err.Error(), "json unmarshal") +} + +func TestDecodeConnectorConfig_EmptyConnectorID(t *testing.T) { + conn := dex.Connector{ + Type: "oidc", + Name: "no-id", + ID: "", + } + data, err := json.Marshal(conn) + require.NoError(t, err) + + encoded := base64.StdEncoding.EncodeToString(data) + _, err = decodeConnectorConfig(encoded) + require.Error(t, err) + assert.Contains(t, err.Error(), "connector ID is empty") +} + +func TestValidateConfig(t *testing.T) { + valid := &migrationConfig{ + configPath: "/etc/netbird/management.json", + dataDir: "/var/lib/netbird", + idpSeedInfo: "some-base64", + apiURL: "https://api.example.com", + dashboardURL: "https://dash.example.com", + } + + t.Run("valid config", func(t *testing.T) { + require.NoError(t, validateConfig(valid)) + }) + + t.Run("missing configPath", func(t *testing.T) { + cfg := *valid + cfg.configPath = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--config") + }) + + t.Run("missing dataDir", func(t *testing.T) { + cfg := *valid + cfg.dataDir = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--datadir") + }) + + t.Run("missing idpSeedInfo", func(t *testing.T) { + cfg := *valid + cfg.idpSeedInfo = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--idp-seed-info") + }) + + t.Run("missing apiUrl", func(t *testing.T) { + cfg := *valid + cfg.apiURL = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--api-domain") + }) + + t.Run("missing dashboardUrl", func(t *testing.T) { + cfg := *valid + cfg.dashboardURL = "" + err := validateConfig(&cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "--dashboard-domain") + }) +} + +func TestConfigFromArgs_EnvVarsApplied(t *testing.T) { + t.Run("env vars fill in for missing flags", func(t *testing.T) { + t.Setenv("NETBIRD_CONFIG_PATH", "/env/management.json") + t.Setenv("NETBIRD_DATA_DIR", "/env/data") + t.Setenv("NETBIRD_IDP_SEED_INFO", "env-seed") + t.Setenv("NETBIRD_API_URL", "https://api.env.com") + t.Setenv("NETBIRD_DASHBOARD_URL", "https://dash.env.com") + + cfg, err := configFromArgs([]string{}) + require.NoError(t, err) + + assert.Equal(t, "/env/management.json", cfg.configPath) + assert.Equal(t, "/env/data", cfg.dataDir) + assert.Equal(t, "env-seed", cfg.idpSeedInfo) + assert.Equal(t, "https://api.env.com", cfg.apiURL) + assert.Equal(t, "https://dash.env.com", cfg.dashboardURL) + }) + + t.Run("flags work without env vars", func(t *testing.T) { + cfg, err := configFromArgs([]string{ + "--config", "/flag/management.json", + "--datadir", "/flag/data", + "--idp-seed-info", "flag-seed", + "--api-url", "https://api.flag.com", + "--dashboard-url", "https://dash.flag.com", + }) + require.NoError(t, err) + + assert.Equal(t, "/flag/management.json", cfg.configPath) + assert.Equal(t, "/flag/data", cfg.dataDir) + assert.Equal(t, "flag-seed", cfg.idpSeedInfo) + assert.Equal(t, "https://api.flag.com", cfg.apiURL) + assert.Equal(t, "https://dash.flag.com", cfg.dashboardURL) + }) + + t.Run("env vars override flags", func(t *testing.T) { + t.Setenv("NETBIRD_CONFIG_PATH", "/env/management.json") + t.Setenv("NETBIRD_API_URL", "https://api.env.com") + + cfg, err := configFromArgs([]string{ + "--config", "/flag/management.json", + "--datadir", "/flag/data", + "--idp-seed-info", "flag-seed", + "--api-url", "https://api.flag.com", + "--dashboard-url", "https://dash.flag.com", + }) + require.NoError(t, err) + + assert.Equal(t, "/env/management.json", cfg.configPath, "env should override flag") + assert.Equal(t, "https://api.env.com", cfg.apiURL, "env should override flag") + assert.Equal(t, "https://dash.flag.com", cfg.dashboardURL, "flag preserved when no env override") + }) + + t.Run("--domain flag with specific env var override", func(t *testing.T) { + t.Setenv("NETBIRD_API_URL", "https://api.env.com") + + cfg, err := configFromArgs([]string{ + "--domain", "both.flag.com", + "--config", "/path", + "--datadir", "/data", + "--idp-seed-info", "seed", + }) + require.NoError(t, err) + + assert.Equal(t, "https://api.env.com", cfg.apiURL, "specific env beats --domain") + assert.Equal(t, "both.flag.com", cfg.dashboardURL, "--domain fills dashboard") + }) +} + +func TestApplyOverrides_MostGranularWins(t *testing.T) { + t.Run("specific flags beat --domain", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.specific.com", + dashboardURL: "dash.specific.com", + } + applyOverrides(cfg, "broad.com") + + assert.Equal(t, "api.specific.com", cfg.apiURL) + assert.Equal(t, "dash.specific.com", cfg.dashboardURL) + }) + + t.Run("--domain fills blanks when specific flags missing", func(t *testing.T) { + cfg := &migrationConfig{} + applyOverrides(cfg, "broad.com") + + assert.Equal(t, "broad.com", cfg.apiURL) + assert.Equal(t, "broad.com", cfg.dashboardURL) + }) + + t.Run("--domain fills only the missing specific flag", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.specific.com", + } + applyOverrides(cfg, "broad.com") + + assert.Equal(t, "api.specific.com", cfg.apiURL) + assert.Equal(t, "broad.com", cfg.dashboardURL) + }) + + t.Run("NETBIRD_DOMAIN overrides flags", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.flag.com", + dashboardURL: "dash.flag.com", + } + t.Setenv("NETBIRD_DOMAIN", "env-broad.com") + + applyOverrides(cfg, "") + + assert.Equal(t, "env-broad.com", cfg.apiURL) + assert.Equal(t, "env-broad.com", cfg.dashboardURL) + }) + + t.Run("specific env vars beat NETBIRD_DOMAIN", func(t *testing.T) { + cfg := &migrationConfig{} + t.Setenv("NETBIRD_DOMAIN", "env-broad.com") + t.Setenv("NETBIRD_API_URL", "api.env-specific.com") + t.Setenv("NETBIRD_DASHBOARD_URL", "dash.env-specific.com") + + applyOverrides(cfg, "") + + assert.Equal(t, "api.env-specific.com", cfg.apiURL) + assert.Equal(t, "dash.env-specific.com", cfg.dashboardURL) + }) + + t.Run("one specific env var overrides only its field", func(t *testing.T) { + cfg := &migrationConfig{} + t.Setenv("NETBIRD_DOMAIN", "env-broad.com") + t.Setenv("NETBIRD_API_URL", "api.env-specific.com") + + applyOverrides(cfg, "") + + assert.Equal(t, "api.env-specific.com", cfg.apiURL) + assert.Equal(t, "env-broad.com", cfg.dashboardURL) + }) + + t.Run("specific env vars beat all flags combined", func(t *testing.T) { + cfg := &migrationConfig{ + apiURL: "api.flag.com", + dashboardURL: "dash.flag.com", + } + t.Setenv("NETBIRD_API_URL", "api.env.com") + t.Setenv("NETBIRD_DASHBOARD_URL", "dash.env.com") + + applyOverrides(cfg, "domain-flag.com") + + assert.Equal(t, "api.env.com", cfg.apiURL) + assert.Equal(t, "dash.env.com", cfg.dashboardURL) + }) + + t.Run("env vars override all non-domain flags", func(t *testing.T) { + cfg := &migrationConfig{ + configPath: "/flag/path", + dataDir: "/flag/data", + idpSeedInfo: "flag-seed", + dryRun: false, + force: false, + skipConfig: false, + skipPopulateUserInfo: false, + logLevel: "info", + } + t.Setenv("NETBIRD_CONFIG_PATH", "/env/path") + t.Setenv("NETBIRD_DATA_DIR", "/env/data") + t.Setenv("NETBIRD_IDP_SEED_INFO", "env-seed") + t.Setenv("NETBIRD_DRY_RUN", "true") + t.Setenv("NETBIRD_FORCE", "true") + t.Setenv("NETBIRD_SKIP_CONFIG", "true") + t.Setenv("NETBIRD_SKIP_POPULATE_USER_INFO", "true") + t.Setenv("NETBIRD_LOG_LEVEL", "debug") + + applyOverrides(cfg, "") + + assert.Equal(t, "/env/path", cfg.configPath) + assert.Equal(t, "/env/data", cfg.dataDir) + assert.Equal(t, "env-seed", cfg.idpSeedInfo) + assert.True(t, cfg.dryRun) + assert.True(t, cfg.force) + assert.True(t, cfg.skipConfig) + assert.True(t, cfg.skipPopulateUserInfo) + assert.Equal(t, "debug", cfg.logLevel) + }) + + t.Run("boolean env vars properly parse false values", func(t *testing.T) { + cfg := &migrationConfig{} + t.Setenv("NETBIRD_DRY_RUN", "false") + t.Setenv("NETBIRD_FORCE", "yes") + t.Setenv("NETBIRD_SKIP_CONFIG", "0") + + applyOverrides(cfg, "") + + assert.False(t, cfg.dryRun) + assert.False(t, cfg.force) + assert.False(t, cfg.skipConfig) + }) + + t.Run("unset env vars do not override flags", func(t *testing.T) { + cfg := &migrationConfig{ + configPath: "/flag/path", + dataDir: "/flag/data", + idpSeedInfo: "flag-seed", + dryRun: true, + logLevel: "warn", + } + + applyOverrides(cfg, "") + + assert.Equal(t, "/flag/path", cfg.configPath) + assert.Equal(t, "/flag/data", cfg.dataDir) + assert.Equal(t, "flag-seed", cfg.idpSeedInfo) + assert.True(t, cfg.dryRun) + assert.Equal(t, "warn", cfg.logLevel) + }) +} + +func TestBuildUrl(t *testing.T) { + tests := []struct { + name string + uri string + path string + expected string + }{ + {"with https scheme", "https://example.com", "/oauth2", "https://example.com/oauth2"}, + {"with http scheme", "http://example.com", "/oauth2/callback", "http://example.com/oauth2/callback"}, + {"bare domain", "example.com", "/oauth2", "https://example.com/oauth2"}, + {"domain with port", "example.com:8080", "/nb-auth", "https://example.com:8080/nb-auth"}, + {"trailing slash on uri", "https://example.com/", "/oauth2", "https://example.com/oauth2"}, + {"nested path", "https://example.com", "/oauth2/callback", "https://example.com/oauth2/callback"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + url, err := buildURL(tt.uri, tt.path) + assert.NoError(t, err) + assert.Equal(t, tt.expected, url) + }) + } +} + +func TestGenerateConfig(t *testing.T) { + t.Run("generates valid config", func(t *testing.T) { + dir := t.TempDir() + configPath := filepath.Join(dir, "management.json") + + originalConfig := `{ + "Datadir": "/var/lib/netbird", + "HttpConfig": { + "LetsEncryptDomain": "mgmt.example.com", + "CertFile": "/etc/ssl/cert.pem", + "CertKey": "/etc/ssl/key.pem", + "AuthIssuer": "https://zitadel.example.com/oauth2", + "AuthKeysLocation": "https://zitadel.example.com/oauth2/keys", + "OIDCConfigEndpoint": "https://zitadel.example.com/.well-known/openid-configuration", + "AuthClientID": "old-client-id", + "AuthUserIDClaim": "preferred_username" + }, + "IdpManagerConfig": { + "ManagerType": "zitadel", + "ClientConfig": { + "Issuer": "https://zitadel.example.com", + "ClientID": "zit-id", + "ClientSecret": "zit-secret" + } + } +}` + require.NoError(t, os.WriteFile(configPath, []byte(originalConfig), 0o600)) + + cfg := &migrationConfig{ + configPath: configPath, + dashboardURL: "https://mgmt.example.com", + apiURL: "https://mgmt.example.com", + } + conn := &dex.Connector{ + Type: "zitadel", + Name: "zitadel", + ID: "zitadel", + Config: map[string]any{ + "issuer": "https://zitadel.example.com", + "clientID": "zit-id", + "clientSecret": "zit-secret", + }, + } + + err := generateConfig(cfg, conn) + require.NoError(t, err) + + // Check backup was created + backupPath := configPath + ".bak" + backupData, err := os.ReadFile(backupPath) + require.NoError(t, err) + assert.Equal(t, originalConfig, string(backupData)) + + // Read and parse the new config + newData, err := os.ReadFile(configPath) + require.NoError(t, err) + + var result map[string]any + require.NoError(t, json.Unmarshal(newData, &result)) + + // IdpManagerConfig should be removed + _, hasOldIdp := result["IdpManagerConfig"] + assert.False(t, hasOldIdp, "IdpManagerConfig should be removed") + + _, hasPKCE := result["PKCEAuthorizationFlow"] + assert.False(t, hasPKCE, "PKCEAuthorizationFlow should be removed") + + // EmbeddedIdP should be present with minimal fields + embeddedIDP, ok := result["EmbeddedIdP"].(map[string]any) + require.True(t, ok, "EmbeddedIdP should be present") + assert.Equal(t, true, embeddedIDP["Enabled"]) + assert.Equal(t, "https://mgmt.example.com/oauth2", embeddedIDP["Issuer"]) + assert.Nil(t, embeddedIDP["LocalAuthDisabled"], "LocalAuthDisabled should not be set") + assert.Nil(t, embeddedIDP["SignKeyRefreshEnabled"], "SignKeyRefreshEnabled should not be set") + assert.Nil(t, embeddedIDP["CLIRedirectURIs"], "CLIRedirectURIs should not be set") + + // Static connector's redirectURI should use the management domain + connectors := embeddedIDP["StaticConnectors"].([]any) + require.Len(t, connectors, 1) + firstConn := connectors[0].(map[string]any) + connCfg := firstConn["config"].(map[string]any) + assert.Equal(t, "https://mgmt.example.com/oauth2/callback", connCfg["redirectURI"], + "redirectURI should be overridden to use the management domain") + + // HttpConfig should only have CertFile and CertKey + httpConfig, ok := result["HttpConfig"].(map[string]any) + require.True(t, ok, "HttpConfig should be present") + assert.Equal(t, "/etc/ssl/cert.pem", httpConfig["CertFile"]) + assert.Equal(t, "/etc/ssl/key.pem", httpConfig["CertKey"]) + assert.Nil(t, httpConfig["AuthIssuer"], "AuthIssuer should be stripped") + + // Datadir should be preserved + assert.Equal(t, "/var/lib/netbird", result["Datadir"]) + }) + + t.Run("dry run does not write files", func(t *testing.T) { + dir := t.TempDir() + configPath := filepath.Join(dir, "management.json") + + originalConfig := `{"HttpConfig": {"CertFile": "", "CertKey": ""}}` + require.NoError(t, os.WriteFile(configPath, []byte(originalConfig), 0o600)) + + cfg := &migrationConfig{ + configPath: configPath, + dashboardURL: "https://mgmt.example.com", + apiURL: "https://mgmt.example.com", + dryRun: true, + } + conn := &dex.Connector{Type: "oidc", Name: "test", ID: "test"} + + err := generateConfig(cfg, conn) + require.NoError(t, err) + + // Original should be unchanged + data, err := os.ReadFile(configPath) + require.NoError(t, err) + assert.Equal(t, originalConfig, string(data)) + + // No backup should exist + _, err = os.Stat(configPath + ".bak") + assert.True(t, os.IsNotExist(err)) + }) +} diff --git a/upload-server/server/local.go b/upload-server/server/local.go index f12c472d2..f7ca50011 100644 --- a/upload-server/server/local.go +++ b/upload-server/server/local.go @@ -7,6 +7,7 @@ import ( "net/url" "os" "path/filepath" + "strings" log "github.com/sirupsen/logrus" @@ -82,15 +83,18 @@ func (l *local) getUploadURL(objectKey string) (string, error) { return newURL.String(), nil } +const maxUploadSize = 150 << 20 + func (l *local) handlePutRequest(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPut { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } + r.Body = http.MaxBytesReader(w, r.Body, maxUploadSize) body, err := io.ReadAll(r.Body) if err != nil { - http.Error(w, fmt.Sprintf("failed to read body: %v", err), http.StatusInternalServerError) + http.Error(w, "request body too large or failed to read", http.StatusRequestEntityTooLarge) return } @@ -105,20 +109,47 @@ func (l *local) handlePutRequest(w http.ResponseWriter, r *http.Request) { return } - dirPath := filepath.Join(l.dir, uploadDir) - err = os.MkdirAll(dirPath, 0750) - if err != nil { + cleanBase := filepath.Clean(l.dir) + string(filepath.Separator) + + dirPath := filepath.Clean(filepath.Join(l.dir, uploadDir)) + if !strings.HasPrefix(dirPath, cleanBase) { + http.Error(w, "invalid path", http.StatusBadRequest) + log.Warnf("Path traversal attempt blocked (dir): %s", dirPath) + return + } + + filePath := filepath.Clean(filepath.Join(dirPath, uploadFile)) + if !strings.HasPrefix(filePath, cleanBase) { + http.Error(w, "invalid path", http.StatusBadRequest) + log.Warnf("Path traversal attempt blocked (file): %s", filePath) + return + } + + if err = os.MkdirAll(dirPath, 0750); err != nil { http.Error(w, "failed to create upload dir", http.StatusInternalServerError) log.Errorf("Failed to create upload dir: %v", err) return } - file := filepath.Join(dirPath, uploadFile) - if err := os.WriteFile(file, body, 0600); err != nil { - http.Error(w, "failed to write file", http.StatusInternalServerError) - log.Errorf("Failed to write file %s: %v", file, err) + flags := os.O_WRONLY | os.O_CREATE | os.O_EXCL + f, err := os.OpenFile(filePath, flags, 0600) + if err != nil { + if os.IsExist(err) { + http.Error(w, "file already exists", http.StatusConflict) + return + } + http.Error(w, "failed to create file", http.StatusInternalServerError) + log.Errorf("Failed to create file %s: %v", filePath, err) return } - log.Infof("Uploading file %s", file) + defer func() { _ = f.Close() }() + + if _, err = f.Write(body); err != nil { + http.Error(w, "failed to write file", http.StatusInternalServerError) + log.Errorf("Failed to write file %s: %v", filePath, err) + return + } + + log.Infof("Uploaded file %s", filePath) w.WriteHeader(http.StatusOK) } diff --git a/upload-server/server/local_test.go b/upload-server/server/local_test.go index bd8a87809..64b8fd228 100644 --- a/upload-server/server/local_test.go +++ b/upload-server/server/local_test.go @@ -63,3 +63,90 @@ func Test_LocalHandlePutRequest(t *testing.T) { require.NoError(t, err) require.Equal(t, fileContent, createdFileContent) } + +func Test_LocalHandlePutRequest_PathTraversal(t *testing.T) { + mockDir := t.TempDir() + mockURL := "http://localhost:8080" + t.Setenv("SERVER_URL", mockURL) + t.Setenv("STORE_DIR", mockDir) + + mux := http.NewServeMux() + err := configureLocalHandlers(mux) + require.NoError(t, err) + + fileContent := []byte("malicious content") + req := httptest.NewRequest(http.MethodPut, putURLPath+"/uploads/%2e%2e%2f%2e%2e%2fetc%2fpasswd", bytes.NewReader(fileContent)) + + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) + + _, err = os.Stat(filepath.Join(mockDir, "..", "..", "etc", "passwd")) + require.True(t, os.IsNotExist(err), "traversal file should not exist") +} + +func Test_LocalHandlePutRequest_DirTraversal(t *testing.T) { + mockDir := t.TempDir() + t.Setenv("SERVER_URL", "http://localhost:8080") + t.Setenv("STORE_DIR", mockDir) + + l := &local{url: "http://localhost:8080", dir: mockDir} + + body := bytes.NewReader([]byte("bad")) + req := httptest.NewRequest(http.MethodPut, putURLPath+"/x/evil.txt", body) + req.SetPathValue("dir", "../../../tmp") + req.SetPathValue("file", "evil.txt") + + rec := httptest.NewRecorder() + l.handlePutRequest(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) + + _, err := os.Stat(filepath.Join("/tmp", "evil.txt")) + require.True(t, os.IsNotExist(err), "traversal file should not exist outside store dir") +} + +func Test_LocalHandlePutRequest_DuplicateFile(t *testing.T) { + mockDir := t.TempDir() + t.Setenv("SERVER_URL", "http://localhost:8080") + t.Setenv("STORE_DIR", mockDir) + + mux := http.NewServeMux() + err := configureLocalHandlers(mux) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPut, putURLPath+"/dir/dup.txt", bytes.NewReader([]byte("first"))) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + require.Equal(t, http.StatusOK, rec.Code) + + req = httptest.NewRequest(http.MethodPut, putURLPath+"/dir/dup.txt", bytes.NewReader([]byte("second"))) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + require.Equal(t, http.StatusConflict, rec.Code) + + content, err := os.ReadFile(filepath.Join(mockDir, "dir", "dup.txt")) + require.NoError(t, err) + require.Equal(t, []byte("first"), content) +} + +func Test_LocalHandlePutRequest_BodyTooLarge(t *testing.T) { + mockDir := t.TempDir() + t.Setenv("SERVER_URL", "http://localhost:8080") + t.Setenv("STORE_DIR", mockDir) + + mux := http.NewServeMux() + err := configureLocalHandlers(mux) + require.NoError(t, err) + + largeBody := make([]byte, maxUploadSize+1) + req := httptest.NewRequest(http.MethodPut, putURLPath+"/dir/big.txt", bytes.NewReader(largeBody)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + require.Equal(t, http.StatusRequestEntityTooLarge, rec.Code) + + _, err = os.Stat(filepath.Join(mockDir, "dir", "big.txt")) + require.True(t, os.IsNotExist(err)) +} diff --git a/upload-server/server/s3_test.go b/upload-server/server/s3_test.go index 26b0ecd09..7ab1bb379 100644 --- a/upload-server/server/s3_test.go +++ b/upload-server/server/s3_test.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/http" "net/http/httptest" - "os" "runtime" "testing" "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" @@ -20,45 +19,55 @@ import ( ) func Test_S3HandlerGetUploadURL(t *testing.T) { - if runtime.GOOS != "linux" && os.Getenv("CI") == "true" { - t.Skip("Skipping test on non-Linux and CI environment due to docker dependency") - } - if runtime.GOOS == "windows" { - t.Skip("Skipping test on Windows due to potential docker dependency") + if runtime.GOOS != "linux" { + t.Skip("Skipping test on non-Linux due to docker dependency") } - awsEndpoint := "http://127.0.0.1:4566" awsRegion := "us-east-1" ctx := context.Background() - containerRequest := testcontainers.ContainerRequest{ - Image: "localstack/localstack:s3-latest", - ExposedPorts: []string{"4566:4566/tcp"}, - WaitingFor: wait.ForLog("Ready"), - } - c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ - ContainerRequest: containerRequest, - Started: true, + ContainerRequest: testcontainers.ContainerRequest{ + Image: "minio/minio:RELEASE.2025-04-22T22-12-26Z", + ExposedPorts: []string{"9000/tcp"}, + Env: map[string]string{ + "MINIO_ROOT_USER": "minioadmin", + "MINIO_ROOT_PASSWORD": "minioadmin", + }, + Cmd: []string{"server", "/data"}, + WaitingFor: wait.ForHTTP("/minio/health/ready").WithPort("9000"), + }, + Started: true, }) - if err != nil { - t.Error(err) - } - defer func(c testcontainers.Container, ctx context.Context) { + require.NoError(t, err) + t.Cleanup(func() { if err := c.Terminate(ctx); err != nil { t.Log(err) } - }(c, ctx) + }) + + mappedPort, err := c.MappedPort(ctx, "9000") + require.NoError(t, err) + + hostIP, err := c.Host(ctx) + require.NoError(t, err) + + awsEndpoint := "http://" + hostIP + ":" + mappedPort.Port() t.Setenv("AWS_REGION", awsRegion) t.Setenv("AWS_ENDPOINT_URL", awsEndpoint) - t.Setenv("AWS_ACCESS_KEY_ID", "test") - t.Setenv("AWS_SECRET_ACCESS_KEY", "test") + t.Setenv("AWS_ACCESS_KEY_ID", "minioadmin") + t.Setenv("AWS_SECRET_ACCESS_KEY", "minioadmin") + t.Setenv("AWS_CONFIG_FILE", "") + t.Setenv("AWS_SHARED_CREDENTIALS_FILE", "") + t.Setenv("AWS_PROFILE", "") - cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(awsRegion), config.WithBaseEndpoint(awsEndpoint)) - if err != nil { - t.Error(err) - } + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(awsRegion), + config.WithBaseEndpoint(awsEndpoint), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("minioadmin", "minioadmin", "")), + ) + require.NoError(t, err) client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.UsePathStyle = true @@ -66,19 +75,16 @@ func Test_S3HandlerGetUploadURL(t *testing.T) { }) bucketName := "test" - if _, err := client.CreateBucket(ctx, &s3.CreateBucketInput{ + _, err = client.CreateBucket(ctx, &s3.CreateBucketInput{ Bucket: &bucketName, - }); err != nil { - t.Error(err) - } + }) + require.NoError(t, err) list, err := client.ListBuckets(ctx, &s3.ListBucketsInput{}) - if err != nil { - t.Error(err) - } + require.NoError(t, err) - assert.Equal(t, len(list.Buckets), 1) - assert.Equal(t, *list.Buckets[0].Name, bucketName) + require.Len(t, list.Buckets, 1) + require.Equal(t, bucketName, *list.Buckets[0].Name) t.Setenv(bucketVar, bucketName)