mirror of
https://github.com/netbirdio/netbird.git
synced 2026-04-18 00:06:38 +00:00
Compare commits
35 Commits
feature/ad
...
feature/ch
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28303d7ded | ||
|
|
5585adce18 | ||
|
|
f884299823 | ||
|
|
15aa6bae1b | ||
|
|
11eb725ac8 | ||
|
|
a19611d8e0 | ||
|
|
30c02ab78c | ||
|
|
3acd86e346 | ||
|
|
5c20f13c48 | ||
|
|
e6587b071d | ||
|
|
85451ab4cd | ||
|
|
a7f3ba03eb | ||
|
|
4f0a3a77ad | ||
|
|
44655ca9b5 | ||
|
|
e601278117 | ||
|
|
8e7b016be2 | ||
|
|
9e01ea7aae | ||
|
|
cfc7ec8bb9 | ||
|
|
b3bbc0e5c6 | ||
|
|
d7c8e37ff4 | ||
|
|
05b66e73bc | ||
|
|
01ceedac89 | ||
|
|
403babd433 | ||
|
|
47133031e5 | ||
|
|
82da606886 | ||
|
|
bbe5ae2145 | ||
|
|
0b21498b39 | ||
|
|
0ca59535f1 | ||
|
|
59c77d0658 | ||
|
|
333e045099 | ||
|
|
c2c4d9d336 | ||
|
|
9a6a72e88e | ||
|
|
afe6d9fca4 | ||
|
|
ef82905526 | ||
|
|
d18747e846 |
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Community Support
|
||||
url: https://forum.netbird.io/
|
||||
about: Community support forum
|
||||
- name: Cloud Support
|
||||
url: https://docs.netbird.io/help/report-bug-issues
|
||||
about: Contact us for support
|
||||
- name: Client/Connection Troubleshooting
|
||||
url: https://docs.netbird.io/help/troubleshooting-client
|
||||
about: See our client troubleshooting guide for help addressing common issues
|
||||
- name: Self-host Troubleshooting
|
||||
url: https://docs.netbird.io/selfhosted/troubleshooting
|
||||
about: See our self-host troubleshooting guide for help addressing common issues
|
||||
2
.github/workflows/golangci-lint.yml
vendored
2
.github/workflows/golangci-lint.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
- name: codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver
|
||||
ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te
|
||||
skip: go.mod,go.sum,**/proxy/web/**
|
||||
golangci:
|
||||
strategy:
|
||||
|
||||
51
.github/workflows/pr-title-check.yml
vendored
Normal file
51
.github/workflows/pr-title-check.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: PR Title Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
check-title:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Validate PR title prefix
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const title = context.payload.pull_request.title;
|
||||
const allowedTags = [
|
||||
'management',
|
||||
'client',
|
||||
'signal',
|
||||
'proxy',
|
||||
'relay',
|
||||
'misc',
|
||||
'infrastructure',
|
||||
'self-hosted',
|
||||
'doc',
|
||||
];
|
||||
|
||||
const pattern = /^\[([^\]]+)\]\s+.+/;
|
||||
const match = title.match(pattern);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(
|
||||
`PR title must start with a tag in brackets.\n` +
|
||||
`Example: [client] fix something\n` +
|
||||
`Allowed tags: ${allowedTags.join(', ')}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const tags = match[1].split(',').map(t => t.trim().toLowerCase());
|
||||
|
||||
const invalid = tags.filter(t => !allowedTags.includes(t));
|
||||
if (invalid.length > 0) {
|
||||
core.setFailed(
|
||||
`Invalid tag(s): ${invalid.join(', ')}\n` +
|
||||
`Allowed tags: ${allowedTags.join(', ')}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Valid PR title tags: [${tags.join(', ')}]`);
|
||||
@@ -4,7 +4,7 @@
|
||||
# sudo podman build -t localhost/netbird:latest -f client/Dockerfile --ignorefile .dockerignore-client .
|
||||
# sudo podman run --rm -it --cap-add={BPF,NET_ADMIN,NET_RAW} localhost/netbird:latest
|
||||
|
||||
FROM alpine:3.23.2
|
||||
FROM alpine:3.23.3
|
||||
# iproute2: busybox doesn't display ip rules properly
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
daddr "github.com/netbirdio/netbird/client/internal/daemonaddr"
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
)
|
||||
|
||||
@@ -80,6 +81,15 @@ var (
|
||||
Short: "",
|
||||
Long: "",
|
||||
SilenceUsage: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
SetFlagsFromEnvVars(cmd.Root())
|
||||
|
||||
// Don't resolve for service commands — they create the socket, not connect to it.
|
||||
if !isServiceCmd(cmd) {
|
||||
daemonAddr = daddr.ResolveUnixDaemonAddr(daemonAddr)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -386,7 +396,6 @@ func migrateToNetbird(oldPath, newPath string) bool {
|
||||
}
|
||||
|
||||
func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) {
|
||||
SetFlagsFromEnvVars(rootCmd)
|
||||
cmd.SetOut(cmd.OutOrStdout())
|
||||
|
||||
conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr)
|
||||
@@ -399,3 +408,13 @@ func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) {
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// isServiceCmd returns true if cmd is the "service" command or a child of it.
|
||||
func isServiceCmd(cmd *cobra.Command) bool {
|
||||
for c := cmd; c != nil; c = c.Parent() {
|
||||
if c.Name() == "service" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -331,8 +331,11 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
state.Set(StatusConnected)
|
||||
|
||||
if runningChan != nil {
|
||||
close(runningChan)
|
||||
runningChan = nil
|
||||
select {
|
||||
case <-runningChan:
|
||||
default:
|
||||
close(runningChan)
|
||||
}
|
||||
}
|
||||
|
||||
<-engineCtx.Done()
|
||||
|
||||
60
client/internal/daemonaddr/resolve.go
Normal file
60
client/internal/daemonaddr/resolve.go
Normal file
@@ -0,0 +1,60 @@
|
||||
//go:build !windows && !ios && !android
|
||||
|
||||
package daemonaddr
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var scanDir = "/var/run/netbird"
|
||||
|
||||
// setScanDir overrides the scan directory (used by tests).
|
||||
func setScanDir(dir string) {
|
||||
scanDir = dir
|
||||
}
|
||||
|
||||
// ResolveUnixDaemonAddr checks whether the default Unix socket exists and, if not,
|
||||
// scans /var/run/netbird/ for a single .sock file to use instead. This handles the
|
||||
// mismatch between the netbird@.service template (which places the socket under
|
||||
// /var/run/netbird/<instance>.sock) and the CLI default (/var/run/netbird.sock).
|
||||
func ResolveUnixDaemonAddr(addr string) string {
|
||||
if !strings.HasPrefix(addr, "unix://") {
|
||||
return addr
|
||||
}
|
||||
|
||||
sockPath := strings.TrimPrefix(addr, "unix://")
|
||||
if _, err := os.Stat(sockPath); err == nil {
|
||||
return addr
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(scanDir)
|
||||
if err != nil {
|
||||
return addr
|
||||
}
|
||||
|
||||
var found []string
|
||||
for _, e := range entries {
|
||||
if e.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(e.Name(), ".sock") {
|
||||
found = append(found, filepath.Join(scanDir, e.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
switch len(found) {
|
||||
case 1:
|
||||
resolved := "unix://" + found[0]
|
||||
log.Debugf("Default daemon socket not found, using discovered socket: %s", resolved)
|
||||
return resolved
|
||||
case 0:
|
||||
return addr
|
||||
default:
|
||||
log.Warnf("Default daemon socket not found and multiple sockets discovered in %s; pass --daemon-addr explicitly", scanDir)
|
||||
return addr
|
||||
}
|
||||
}
|
||||
8
client/internal/daemonaddr/resolve_stub.go
Normal file
8
client/internal/daemonaddr/resolve_stub.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build windows || ios || android
|
||||
|
||||
package daemonaddr
|
||||
|
||||
// ResolveUnixDaemonAddr is a no-op on platforms that don't use Unix sockets.
|
||||
func ResolveUnixDaemonAddr(addr string) string {
|
||||
return addr
|
||||
}
|
||||
121
client/internal/daemonaddr/resolve_test.go
Normal file
121
client/internal/daemonaddr/resolve_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
//go:build !windows && !ios && !android
|
||||
|
||||
package daemonaddr
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// createSockFile creates a regular file with a .sock extension.
|
||||
// ResolveUnixDaemonAddr uses os.Stat (not net.Dial), so a regular file is
|
||||
// sufficient and avoids Unix socket path-length limits on macOS.
|
||||
func createSockFile(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.WriteFile(path, nil, 0o600); err != nil {
|
||||
t.Fatalf("failed to create test sock file at %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_DefaultExists(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
sock := filepath.Join(tmp, "netbird.sock")
|
||||
createSockFile(t, sock)
|
||||
|
||||
addr := "unix://" + sock
|
||||
got := ResolveUnixDaemonAddr(addr)
|
||||
if got != addr {
|
||||
t.Errorf("expected %s, got %s", addr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_SingleDiscovered(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
// Default socket does not exist
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
// Create a scan dir with one socket
|
||||
sd := filepath.Join(tmp, "netbird")
|
||||
if err := os.MkdirAll(sd, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
instanceSock := filepath.Join(sd, "main.sock")
|
||||
createSockFile(t, instanceSock)
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(sd)
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
expected := "unix://" + instanceSock
|
||||
if got != expected {
|
||||
t.Errorf("expected %s, got %s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_MultipleDiscovered(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
sd := filepath.Join(tmp, "netbird")
|
||||
if err := os.MkdirAll(sd, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSockFile(t, filepath.Join(sd, "main.sock"))
|
||||
createSockFile(t, filepath.Join(sd, "other.sock"))
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(sd)
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
if got != defaultAddr {
|
||||
t.Errorf("expected original %s, got %s", defaultAddr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_NoSocketsFound(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
sd := filepath.Join(tmp, "netbird")
|
||||
if err := os.MkdirAll(sd, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(sd)
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
if got != defaultAddr {
|
||||
t.Errorf("expected original %s, got %s", defaultAddr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_NonUnixAddr(t *testing.T) {
|
||||
addr := "tcp://127.0.0.1:41731"
|
||||
got := ResolveUnixDaemonAddr(addr)
|
||||
if got != addr {
|
||||
t.Errorf("expected %s, got %s", addr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_ScanDirMissing(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(filepath.Join(tmp, "nonexistent"))
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
if got != defaultAddr {
|
||||
t.Errorf("expected original %s, got %s", defaultAddr, got)
|
||||
}
|
||||
}
|
||||
@@ -376,9 +376,9 @@ func (m *Resolver) extractDomainsFromServerDomains(serverDomains dnsconfig.Serve
|
||||
}
|
||||
}
|
||||
|
||||
if serverDomains.Flow != "" {
|
||||
domains = append(domains, serverDomains.Flow)
|
||||
}
|
||||
// Flow receiver domain is intentionally excluded from caching.
|
||||
// Cloud providers may rotate the IP behind this domain; a stale cached record
|
||||
// causes TLS certificate verification failures on reconnect.
|
||||
|
||||
for _, stun := range serverDomains.Stuns {
|
||||
if stun != "" {
|
||||
|
||||
@@ -391,7 +391,8 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) {
|
||||
}
|
||||
assert.Len(t, resolver.GetCachedDomains(), 3)
|
||||
|
||||
// Update with partial ServerDomains (only flow domain - new type, should preserve all existing)
|
||||
// Update with partial ServerDomains (only flow domain - flow is intentionally excluded from
|
||||
// caching to prevent TLS failures from stale records, so all existing domains are preserved)
|
||||
partialDomains := dnsconfig.ServerDomains{
|
||||
Flow: "github.com",
|
||||
}
|
||||
@@ -400,10 +401,10 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) {
|
||||
t.Skipf("Skipping test due to DNS resolution failure: %v", err)
|
||||
}
|
||||
|
||||
assert.Len(t, removedDomains, 0, "Should not remove any domains when adding new type")
|
||||
assert.Len(t, removedDomains, 0, "Should not remove any domains when only flow domain is provided")
|
||||
|
||||
finalDomains := resolver.GetCachedDomains()
|
||||
assert.Len(t, finalDomains, 4, "Should have all original domains plus new flow domain")
|
||||
assert.Len(t, finalDomains, 3, "Flow domain is not cached; all original domains should be preserved")
|
||||
|
||||
domainStrings := make([]string, len(finalDomains))
|
||||
for i, d := range finalDomains {
|
||||
@@ -412,5 +413,5 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) {
|
||||
assert.Contains(t, domainStrings, "example.org")
|
||||
assert.Contains(t, domainStrings, "google.com")
|
||||
assert.Contains(t, domainStrings, "cloudflare.com")
|
||||
assert.Contains(t, domainStrings, "github.com")
|
||||
assert.NotContains(t, domainStrings, "github.com")
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ func getConfigDirForUser(username string) (string, error) {
|
||||
|
||||
configDir := filepath.Join(DefaultConfigPathDir, username)
|
||||
if _, err := os.Stat(configDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(configDir, 0600); err != nil {
|
||||
if err := os.MkdirAll(configDir, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@@ -206,9 +206,15 @@ func getConfigDirForUser(username string) (string, error) {
|
||||
return configDir, nil
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
func fileExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
return !os.IsNotExist(err)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// createNewConfig creates a new config generating a new Wireguard key and saving to file
|
||||
@@ -635,7 +641,11 @@ func isPreSharedKeyHidden(preSharedKey *string) bool {
|
||||
|
||||
// UpdateConfig update existing configuration according to input configuration and return with the configuration
|
||||
func UpdateConfig(input ConfigInput) (*Config, error) {
|
||||
if !fileExists(input.ConfigPath) {
|
||||
configExists, err := fileExists(input.ConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
if !configExists {
|
||||
return nil, fmt.Errorf("config file %s does not exist", input.ConfigPath)
|
||||
}
|
||||
|
||||
@@ -644,7 +654,11 @@ func UpdateConfig(input ConfigInput) (*Config, error) {
|
||||
|
||||
// UpdateOrCreateConfig reads existing config or generates a new one
|
||||
func UpdateOrCreateConfig(input ConfigInput) (*Config, error) {
|
||||
if !fileExists(input.ConfigPath) {
|
||||
configExists, err := fileExists(input.ConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
if !configExists {
|
||||
log.Infof("generating new config %s", input.ConfigPath)
|
||||
cfg, err := createNewConfig(input)
|
||||
if err != nil {
|
||||
@@ -657,7 +671,7 @@ func UpdateOrCreateConfig(input ConfigInput) (*Config, error) {
|
||||
if isPreSharedKeyHidden(input.PreSharedKey) {
|
||||
input.PreSharedKey = nil
|
||||
}
|
||||
err := util.EnforcePermission(input.ConfigPath)
|
||||
err = util.EnforcePermission(input.ConfigPath)
|
||||
if err != nil {
|
||||
log.Errorf("failed to enforce permission on config dir: %v", err)
|
||||
}
|
||||
@@ -784,7 +798,12 @@ func ReadConfig(configPath string) (*Config, error) {
|
||||
|
||||
// ReadConfig read config file and return with Config. If it is not exists create a new with default values
|
||||
func readConfig(configPath string, createIfMissing bool) (*Config, error) {
|
||||
if fileExists(configPath) {
|
||||
configExists, err := fileExists(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
|
||||
if configExists {
|
||||
err := util.EnforcePermission(configPath)
|
||||
if err != nil {
|
||||
log.Errorf("failed to enforce permission on config dir: %v", err)
|
||||
@@ -831,7 +850,11 @@ func DirectWriteOutConfig(path string, config *Config) error {
|
||||
// DirectUpdateOrCreateConfig is like UpdateOrCreateConfig but uses direct (non-atomic) writes.
|
||||
// Use this on platforms where atomic writes are blocked (e.g., tvOS sandbox).
|
||||
func DirectUpdateOrCreateConfig(input ConfigInput) (*Config, error) {
|
||||
if !fileExists(input.ConfigPath) {
|
||||
configExists, err := fileExists(input.ConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
if !configExists {
|
||||
log.Infof("generating new config %s", input.ConfigPath)
|
||||
cfg, err := createNewConfig(input)
|
||||
if err != nil {
|
||||
|
||||
@@ -256,7 +256,11 @@ func (s *ServiceManager) AddProfile(profileName, username string) error {
|
||||
}
|
||||
|
||||
profPath := filepath.Join(configDir, profileName+".json")
|
||||
if fileExists(profPath) {
|
||||
profileExists, err := fileExists(profPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if profile exists: %w", err)
|
||||
}
|
||||
if profileExists {
|
||||
return ErrProfileAlreadyExists
|
||||
}
|
||||
|
||||
@@ -285,7 +289,11 @@ func (s *ServiceManager) RemoveProfile(profileName, username string) error {
|
||||
return fmt.Errorf("cannot remove profile with reserved name: %s", defaultProfileName)
|
||||
}
|
||||
profPath := filepath.Join(configDir, profileName+".json")
|
||||
if !fileExists(profPath) {
|
||||
profileExists, err := fileExists(profPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if profile exists: %w", err)
|
||||
}
|
||||
if !profileExists {
|
||||
return ErrProfileNotFound
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,11 @@ func (pm *ProfileManager) GetProfileState(profileName string) (*ProfileState, er
|
||||
}
|
||||
|
||||
stateFile := filepath.Join(configDir, profileName+".state.json")
|
||||
if !fileExists(stateFile) {
|
||||
stateFileExists, err := fileExists(stateFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if profile state file exists: %w", err)
|
||||
}
|
||||
if !stateFileExists {
|
||||
return nil, errors.New("profile state file does not exist")
|
||||
}
|
||||
|
||||
|
||||
@@ -263,8 +263,14 @@ func (w *Watcher) watchPeerStatusChanges(ctx context.Context, peerKey string, pe
|
||||
case <-closer:
|
||||
return
|
||||
case routerStates := <-subscription.Events():
|
||||
peerStateUpdate <- routerStates
|
||||
log.Debugf("triggered route state update for Peer: %s", peerKey)
|
||||
select {
|
||||
case peerStateUpdate <- routerStates:
|
||||
log.Debugf("triggered route state update for Peer: %s", peerKey)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -641,8 +641,6 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
|
||||
return s.waitForUp(callerCtx)
|
||||
}
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
if err := restoreResidualState(callerCtx, s.profileManager.GetStatePath()); err != nil {
|
||||
log.Warnf(errRestoreResidualState, err)
|
||||
}
|
||||
@@ -654,10 +652,12 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
// not in the progress or already successfully established connection.
|
||||
status, err := state.Status()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if status != internal.StatusIdle {
|
||||
s.mutex.Unlock()
|
||||
return nil, fmt.Errorf("up already in progress: current status %s", status)
|
||||
}
|
||||
|
||||
@@ -674,17 +674,20 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
s.actCancel = cancel
|
||||
|
||||
if s.config == nil {
|
||||
s.mutex.Unlock()
|
||||
return nil, fmt.Errorf("config is not defined, please call login command first")
|
||||
}
|
||||
|
||||
activeProf, err := s.profileManager.GetActiveProfileState()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to get active profile state: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile state: %w", err)
|
||||
}
|
||||
|
||||
if msg != nil && msg.ProfileName != nil {
|
||||
if err := s.switchProfileIfNeeded(*msg.ProfileName, msg.Username, activeProf); err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to switch profile: %v", err)
|
||||
return nil, fmt.Errorf("failed to switch profile: %w", err)
|
||||
}
|
||||
@@ -692,6 +695,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
|
||||
activeProf, err = s.profileManager.GetActiveProfileState()
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to get active profile state: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile state: %w", err)
|
||||
}
|
||||
@@ -700,6 +704,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
|
||||
config, _, err := s.getConfig(activeProf)
|
||||
if err != nil {
|
||||
s.mutex.Unlock()
|
||||
log.Errorf("failed to get active profile config: %v", err)
|
||||
return nil, fmt.Errorf("failed to get active profile config: %w", err)
|
||||
}
|
||||
@@ -718,6 +723,7 @@ func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpR
|
||||
}
|
||||
go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, doAutoUpdate, s.clientRunningChan, s.clientGiveUpChan)
|
||||
|
||||
s.mutex.Unlock()
|
||||
return s.waitForUp(callerCtx)
|
||||
}
|
||||
|
||||
@@ -843,14 +849,26 @@ func (s *Server) cleanupConnection() error {
|
||||
if s.actCancel == nil {
|
||||
return ErrServiceNotUp
|
||||
}
|
||||
|
||||
// Capture the engine reference before cancelling the context.
|
||||
// After actCancel(), the connectWithRetryRuns goroutine wakes up
|
||||
// and sets connectClient.engine = nil, causing connectClient.Stop()
|
||||
// to skip the engine shutdown entirely.
|
||||
var engine *internal.Engine
|
||||
if s.connectClient != nil {
|
||||
engine = s.connectClient.Engine()
|
||||
}
|
||||
|
||||
s.actCancel()
|
||||
|
||||
if s.connectClient == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.connectClient.Stop(); err != nil {
|
||||
return err
|
||||
if engine != nil {
|
||||
if err := engine.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.connectClient = nil
|
||||
@@ -1607,9 +1625,14 @@ func (s *Server) GetFeatures(ctx context.Context, msg *proto.GetFeaturesRequest)
|
||||
|
||||
func (s *Server) connect(ctx context.Context, config *profilemanager.Config, statusRecorder *peer.Status, doInitialAutoUpdate bool, runningChan chan struct{}) error {
|
||||
log.Tracef("running client connection")
|
||||
s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate)
|
||||
s.connectClient.SetSyncResponsePersistence(s.persistSyncResponse)
|
||||
if err := s.connectClient.Run(runningChan, s.logFile); err != nil {
|
||||
client := internal.NewConnectClient(ctx, config, statusRecorder, doInitialAutoUpdate)
|
||||
client.SetSyncResponsePersistence(s.persistSyncResponse)
|
||||
|
||||
s.mutex.Lock()
|
||||
s.connectClient = client
|
||||
s.mutex.Unlock()
|
||||
|
||||
if err := client.Run(runningChan, s.logFile); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
187
client/server/server_connect_test.go
Normal file
187
client/server/server_connect_test.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
)
|
||||
|
||||
func newTestServer() *Server {
|
||||
return &Server{
|
||||
rootCtx: context.Background(),
|
||||
statusRecorder: peer.NewRecorder(""),
|
||||
}
|
||||
}
|
||||
|
||||
func newDummyConnectClient(ctx context.Context) *internal.ConnectClient {
|
||||
return internal.NewConnectClient(ctx, nil, nil, false)
|
||||
}
|
||||
|
||||
// TestConnectSetsClientWithMutex validates that connect() sets s.connectClient
|
||||
// under mutex protection so concurrent readers see a consistent value.
|
||||
func TestConnectSetsClientWithMutex(t *testing.T) {
|
||||
s := newTestServer()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Manually simulate what connect() does (without calling Run which panics without full setup)
|
||||
client := newDummyConnectClient(ctx)
|
||||
|
||||
s.mutex.Lock()
|
||||
s.connectClient = client
|
||||
s.mutex.Unlock()
|
||||
|
||||
// Verify the assignment is visible under mutex
|
||||
s.mutex.Lock()
|
||||
assert.Equal(t, client, s.connectClient, "connectClient should be set")
|
||||
s.mutex.Unlock()
|
||||
}
|
||||
|
||||
// TestConcurrentConnectClientAccess validates that concurrent reads of
|
||||
// s.connectClient under mutex don't race with a write.
|
||||
func TestConcurrentConnectClientAccess(t *testing.T) {
|
||||
s := newTestServer()
|
||||
ctx := context.Background()
|
||||
client := newDummyConnectClient(ctx)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
nilCount := 0
|
||||
setCount := 0
|
||||
var mu sync.Mutex
|
||||
|
||||
// Start readers
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.mutex.Lock()
|
||||
c := s.connectClient
|
||||
s.mutex.Unlock()
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if c == nil {
|
||||
nilCount++
|
||||
} else {
|
||||
setCount++
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Simulate connect() writing under mutex
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
s.mutex.Lock()
|
||||
s.connectClient = client
|
||||
s.mutex.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, 50, nilCount+setCount, "all goroutines should complete without panic")
|
||||
}
|
||||
|
||||
// TestCleanupConnection_ClearsConnectClient validates that cleanupConnection
|
||||
// properly nils out connectClient.
|
||||
func TestCleanupConnection_ClearsConnectClient(t *testing.T) {
|
||||
s := newTestServer()
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
s.actCancel = cancel
|
||||
|
||||
s.connectClient = newDummyConnectClient(context.Background())
|
||||
s.clientRunning = true
|
||||
|
||||
err := s.cleanupConnection()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup")
|
||||
}
|
||||
|
||||
// TestCleanState_NilConnectClient validates that CleanState doesn't panic
|
||||
// when connectClient is nil.
|
||||
func TestCleanState_NilConnectClient(t *testing.T) {
|
||||
s := newTestServer()
|
||||
s.connectClient = nil
|
||||
s.profileManager = nil // will cause error if it tries to proceed past the nil check
|
||||
|
||||
// Should not panic — the nil check should prevent calling Status() on nil
|
||||
assert.NotPanics(t, func() {
|
||||
_, _ = s.CleanState(context.Background(), &proto.CleanStateRequest{All: true})
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteState_NilConnectClient validates that DeleteState doesn't panic
|
||||
// when connectClient is nil.
|
||||
func TestDeleteState_NilConnectClient(t *testing.T) {
|
||||
s := newTestServer()
|
||||
s.connectClient = nil
|
||||
s.profileManager = nil
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
_, _ = s.DeleteState(context.Background(), &proto.DeleteStateRequest{All: true})
|
||||
})
|
||||
}
|
||||
|
||||
// TestDownThenUp_StaleRunningChan documents the known state issue where
|
||||
// clientRunningChan from a previous connection is already closed, causing
|
||||
// waitForUp() to return immediately on reconnect.
|
||||
func TestDownThenUp_StaleRunningChan(t *testing.T) {
|
||||
s := newTestServer()
|
||||
|
||||
// Simulate state after a successful connection
|
||||
s.clientRunning = true
|
||||
s.clientRunningChan = make(chan struct{})
|
||||
close(s.clientRunningChan) // closed when engine started
|
||||
s.clientGiveUpChan = make(chan struct{})
|
||||
s.connectClient = newDummyConnectClient(context.Background())
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
s.actCancel = cancel
|
||||
|
||||
// Simulate Down(): cleanupConnection sets connectClient = nil
|
||||
s.mutex.Lock()
|
||||
err := s.cleanupConnection()
|
||||
s.mutex.Unlock()
|
||||
require.NoError(t, err)
|
||||
|
||||
// After cleanup: connectClient is nil, clientRunning still true
|
||||
// (goroutine hasn't exited yet)
|
||||
s.mutex.Lock()
|
||||
assert.Nil(t, s.connectClient, "connectClient should be nil after cleanup")
|
||||
assert.True(t, s.clientRunning, "clientRunning still true until goroutine exits")
|
||||
s.mutex.Unlock()
|
||||
|
||||
// waitForUp() returns immediately due to stale closed clientRunningChan
|
||||
ctx, ctxCancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer ctxCancel()
|
||||
|
||||
waitDone := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := s.waitForUp(ctx)
|
||||
waitDone <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-waitDone:
|
||||
assert.NoError(t, err, "waitForUp returns success on stale channel")
|
||||
// But connectClient is still nil — this is the stale state issue
|
||||
s.mutex.Lock()
|
||||
assert.Nil(t, s.connectClient, "connectClient is nil despite waitForUp success")
|
||||
s.mutex.Unlock()
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatal("waitForUp should have returned immediately due to stale closed channel")
|
||||
}
|
||||
}
|
||||
|
||||
// TestConnectClient_EngineNilOnFreshClient validates that a newly created
|
||||
// ConnectClient has nil Engine (before Run is called).
|
||||
func TestConnectClient_EngineNilOnFreshClient(t *testing.T) {
|
||||
client := newDummyConnectClient(context.Background())
|
||||
assert.Nil(t, client.Engine(), "engine should be nil on fresh ConnectClient")
|
||||
}
|
||||
@@ -39,7 +39,7 @@ func (s *Server) ListStates(_ context.Context, _ *proto.ListStatesRequest) (*pro
|
||||
|
||||
// CleanState handles cleaning of states (performing cleanup operations)
|
||||
func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (*proto.CleanStateResponse, error) {
|
||||
if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting {
|
||||
if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.")
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (s *Server) CleanState(ctx context.Context, req *proto.CleanStateRequest) (
|
||||
|
||||
// DeleteState handles deletion of states without cleanup
|
||||
func (s *Server) DeleteState(ctx context.Context, req *proto.DeleteStateRequest) (*proto.DeleteStateResponse, error) {
|
||||
if s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting {
|
||||
if s.connectClient != nil && (s.connectClient.Status() == internal.StatusConnected || s.connectClient.Status() == internal.StatusConnecting) {
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "cannot clean state while connecting or connected, run 'netbird down' first.")
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/daemonaddr"
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
nbssh "github.com/netbirdio/netbird/client/ssh"
|
||||
@@ -268,7 +269,7 @@ func getDefaultDaemonAddr() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return DefaultDaemonAddrWindows
|
||||
}
|
||||
return DefaultDaemonAddr
|
||||
return daemonaddr.ResolveUnixDaemonAddr(DefaultDaemonAddr)
|
||||
}
|
||||
|
||||
// DialOptions contains options for SSH connections
|
||||
|
||||
@@ -46,8 +46,10 @@ const (
|
||||
cmdSFTP = "<sftp>"
|
||||
cmdNonInteractive = "<idle>"
|
||||
|
||||
// DefaultJWTMaxTokenAge is the default maximum age for JWT tokens accepted by the SSH server
|
||||
DefaultJWTMaxTokenAge = 5 * 60
|
||||
// DefaultJWTMaxTokenAge is the default maximum age for JWT tokens accepted by the SSH server.
|
||||
// Set to 10 minutes to accommodate identity providers like Azure Entra ID
|
||||
// that backdate the iat claim by up to 5 minutes.
|
||||
DefaultJWTMaxTokenAge = 10 * 60
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -323,7 +323,7 @@ type serviceClient struct {
|
||||
|
||||
exitNodeMu sync.Mutex
|
||||
mExitNodeItems []menuHandler
|
||||
exitNodeStates []exitNodeState
|
||||
exitNodeRetryCancel context.CancelFunc
|
||||
mExitNodeDeselectAll *systray.MenuItem
|
||||
logFile string
|
||||
wLoginURL fyne.Window
|
||||
@@ -924,7 +924,7 @@ func (s *serviceClient) updateStatus() error {
|
||||
s.mDown.Enable()
|
||||
s.mNetworks.Enable()
|
||||
s.mExitNode.Enable()
|
||||
go s.updateExitNodes()
|
||||
s.startExitNodeRefresh()
|
||||
systrayIconState = true
|
||||
case status.Status == string(internal.StatusConnecting):
|
||||
s.setConnectingStatus()
|
||||
@@ -985,6 +985,7 @@ func (s *serviceClient) setDisconnectedStatus() {
|
||||
s.mUp.Enable()
|
||||
s.mNetworks.Disable()
|
||||
s.mExitNode.Disable()
|
||||
s.cancelExitNodeRetry()
|
||||
go s.updateExitNodes()
|
||||
}
|
||||
|
||||
|
||||
@@ -100,8 +100,7 @@ func (h *eventHandler) handleConnectClick() {
|
||||
|
||||
func (h *eventHandler) handleDisconnectClick() {
|
||||
h.client.mDown.Disable()
|
||||
|
||||
h.client.exitNodeStates = []exitNodeState{}
|
||||
h.client.cancelExitNodeRetry()
|
||||
|
||||
if h.client.connectCancel != nil {
|
||||
log.Debugf("cancelling ongoing connect operation")
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -34,11 +33,6 @@ const (
|
||||
|
||||
type filter string
|
||||
|
||||
type exitNodeState struct {
|
||||
id string
|
||||
selected bool
|
||||
}
|
||||
|
||||
func (s *serviceClient) showNetworksUI() {
|
||||
s.wNetworks = s.app.NewWindow("Networks")
|
||||
s.wNetworks.SetOnClosed(s.cancel)
|
||||
@@ -335,16 +329,75 @@ func (s *serviceClient) updateNetworksBasedOnDisplayTab(tabs *container.AppTabs,
|
||||
s.updateNetworks(grid, f)
|
||||
}
|
||||
|
||||
func (s *serviceClient) updateExitNodes() {
|
||||
// startExitNodeRefresh initiates exit node menu refresh after connecting.
|
||||
// On Windows, TrayOpenedCh is not supported by the systray library, so we use
|
||||
// a background poller to keep exit nodes in sync while connected.
|
||||
// On macOS/Linux, TrayOpenedCh handles refreshes on each tray open.
|
||||
func (s *serviceClient) startExitNodeRefresh() {
|
||||
s.cancelExitNodeRetry()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
s.exitNodeMu.Lock()
|
||||
s.exitNodeRetryCancel = cancel
|
||||
s.exitNodeMu.Unlock()
|
||||
|
||||
go s.pollExitNodes(ctx)
|
||||
} else {
|
||||
go s.updateExitNodes()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serviceClient) cancelExitNodeRetry() {
|
||||
s.exitNodeMu.Lock()
|
||||
if s.exitNodeRetryCancel != nil {
|
||||
s.exitNodeRetryCancel()
|
||||
s.exitNodeRetryCancel = nil
|
||||
}
|
||||
s.exitNodeMu.Unlock()
|
||||
}
|
||||
|
||||
// pollExitNodes periodically refreshes exit nodes while connected.
|
||||
// Uses a short initial interval to catch routes from the management sync,
|
||||
// then switches to a longer interval for ongoing updates.
|
||||
func (s *serviceClient) pollExitNodes(ctx context.Context) {
|
||||
// Initial fast polling to catch routes as they appear after connect.
|
||||
for i := 0; i < 5; i++ {
|
||||
if s.updateExitNodes() {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.updateExitNodes()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateExitNodes fetches exit nodes from the daemon and recreates the menu.
|
||||
// Returns true if exit nodes were found.
|
||||
func (s *serviceClient) updateExitNodes() bool {
|
||||
conn, err := s.getSrvClient(defaultFailTimeout)
|
||||
if err != nil {
|
||||
log.Errorf("get client: %v", err)
|
||||
return
|
||||
return false
|
||||
}
|
||||
exitNodes, err := s.getExitNodes(conn)
|
||||
if err != nil {
|
||||
log.Errorf("get exit nodes: %v", err)
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
s.exitNodeMu.Lock()
|
||||
@@ -354,28 +407,14 @@ func (s *serviceClient) updateExitNodes() {
|
||||
|
||||
if len(s.mExitNodeItems) > 0 {
|
||||
s.mExitNode.Enable()
|
||||
} else {
|
||||
s.mExitNode.Disable()
|
||||
return true
|
||||
}
|
||||
|
||||
s.mExitNode.Disable()
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) {
|
||||
var exitNodeIDs []exitNodeState
|
||||
for _, node := range exitNodes {
|
||||
exitNodeIDs = append(exitNodeIDs, exitNodeState{
|
||||
id: node.ID,
|
||||
selected: node.Selected,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(exitNodeIDs, func(i, j int) bool {
|
||||
return exitNodeIDs[i].id < exitNodeIDs[j].id
|
||||
})
|
||||
if slices.Equal(s.exitNodeStates, exitNodeIDs) {
|
||||
log.Debug("Exit node menu already up to date")
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range s.mExitNodeItems {
|
||||
node.cancel()
|
||||
node.Hide()
|
||||
@@ -413,8 +452,6 @@ func (s *serviceClient) recreateExitNodeMenu(exitNodes []*proto.Network) {
|
||||
go s.handleChecked(ctx, node.ID, menuItem)
|
||||
}
|
||||
|
||||
s.exitNodeStates = exitNodeIDs
|
||||
|
||||
if showDeselectAll {
|
||||
s.mExitNode.AddSeparator()
|
||||
deselectAllItem := s.mExitNode.AddSubMenuItem("Deselect All", "Deselect All")
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net/netip"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -71,6 +72,7 @@ type ServerConfig struct {
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Store StoreConfig `yaml:"store"`
|
||||
ActivityStore StoreConfig `yaml:"activityStore"`
|
||||
AuthStore StoreConfig `yaml:"authStore"`
|
||||
ReverseProxy ReverseProxyConfig `yaml:"reverseProxy"`
|
||||
}
|
||||
|
||||
@@ -171,7 +173,8 @@ type RelaysConfig struct {
|
||||
type StoreConfig struct {
|
||||
Engine string `yaml:"engine"`
|
||||
EncryptionKey string `yaml:"encryptionKey"`
|
||||
DSN string `yaml:"dsn"` // Connection string for postgres or mysql engines
|
||||
DSN string `yaml:"dsn"` // Connection string for postgres or mysql engines
|
||||
File string `yaml:"file"` // SQLite database file path (optional, defaults to dataDir)
|
||||
}
|
||||
|
||||
// ReverseProxyConfig contains reverse proxy settings
|
||||
@@ -533,6 +536,74 @@ func stripSignalProtocol(uri string) string {
|
||||
return uri
|
||||
}
|
||||
|
||||
func buildRelayConfig(relays RelaysConfig) (*nbconfig.Relay, error) {
|
||||
var ttl time.Duration
|
||||
if relays.CredentialsTTL != "" {
|
||||
var err error
|
||||
ttl, err = time.ParseDuration(relays.CredentialsTTL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid relay credentials TTL %q: %w", relays.CredentialsTTL, err)
|
||||
}
|
||||
}
|
||||
return &nbconfig.Relay{
|
||||
Addresses: relays.Addresses,
|
||||
CredentialsTTL: util.Duration{Duration: ttl},
|
||||
Secret: relays.Secret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// buildEmbeddedIdPConfig builds the embedded IdP configuration.
|
||||
// authStore overrides auth.storage when set.
|
||||
func (c *CombinedConfig) buildEmbeddedIdPConfig(mgmt ManagementConfig) (*idp.EmbeddedIdPConfig, error) {
|
||||
authStorageType := mgmt.Auth.Storage.Type
|
||||
authStorageDSN := c.Server.AuthStore.DSN
|
||||
if c.Server.AuthStore.Engine != "" {
|
||||
authStorageType = c.Server.AuthStore.Engine
|
||||
}
|
||||
if authStorageType == "" {
|
||||
authStorageType = "sqlite3"
|
||||
}
|
||||
authStorageFile := ""
|
||||
if authStorageType == "postgres" {
|
||||
if authStorageDSN == "" {
|
||||
return nil, fmt.Errorf("authStore.dsn is required when authStore.engine is postgres")
|
||||
}
|
||||
} else {
|
||||
authStorageFile = path.Join(mgmt.DataDir, "idp.db")
|
||||
if c.Server.AuthStore.File != "" {
|
||||
authStorageFile = c.Server.AuthStore.File
|
||||
if !filepath.IsAbs(authStorageFile) {
|
||||
authStorageFile = filepath.Join(mgmt.DataDir, authStorageFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &idp.EmbeddedIdPConfig{
|
||||
Enabled: true,
|
||||
Issuer: mgmt.Auth.Issuer,
|
||||
LocalAuthDisabled: mgmt.Auth.LocalAuthDisabled,
|
||||
SignKeyRefreshEnabled: mgmt.Auth.SignKeyRefreshEnabled,
|
||||
Storage: idp.EmbeddedStorageConfig{
|
||||
Type: authStorageType,
|
||||
Config: idp.EmbeddedStorageTypeConfig{
|
||||
File: authStorageFile,
|
||||
DSN: authStorageDSN,
|
||||
},
|
||||
},
|
||||
DashboardRedirectURIs: mgmt.Auth.DashboardRedirectURIs,
|
||||
CLIRedirectURIs: mgmt.Auth.CLIRedirectURIs,
|
||||
}
|
||||
|
||||
if mgmt.Auth.Owner != nil && mgmt.Auth.Owner.Email != "" {
|
||||
cfg.Owner = &idp.OwnerConfig{
|
||||
Email: mgmt.Auth.Owner.Email,
|
||||
Hash: mgmt.Auth.Owner.Password,
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// ToManagementConfig converts CombinedConfig to management server config
|
||||
func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) {
|
||||
mgmt := c.Management
|
||||
@@ -551,19 +622,11 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) {
|
||||
// Build relay config
|
||||
var relayConfig *nbconfig.Relay
|
||||
if len(mgmt.Relays.Addresses) > 0 || mgmt.Relays.Secret != "" {
|
||||
var ttl time.Duration
|
||||
if mgmt.Relays.CredentialsTTL != "" {
|
||||
var err error
|
||||
ttl, err = time.ParseDuration(mgmt.Relays.CredentialsTTL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid relay credentials TTL %q: %w", mgmt.Relays.CredentialsTTL, err)
|
||||
}
|
||||
}
|
||||
relayConfig = &nbconfig.Relay{
|
||||
Addresses: mgmt.Relays.Addresses,
|
||||
CredentialsTTL: util.Duration{Duration: ttl},
|
||||
Secret: mgmt.Relays.Secret,
|
||||
relay, err := buildRelayConfig(mgmt.Relays)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
relayConfig = relay
|
||||
}
|
||||
|
||||
// Build signal config
|
||||
@@ -599,31 +662,9 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) {
|
||||
httpConfig := &nbconfig.HttpServerConfig{}
|
||||
|
||||
// Build embedded IDP config (always enabled in combined server)
|
||||
storageFile := mgmt.Auth.Storage.File
|
||||
if storageFile == "" {
|
||||
storageFile = path.Join(mgmt.DataDir, "idp.db")
|
||||
}
|
||||
|
||||
embeddedIdP := &idp.EmbeddedIdPConfig{
|
||||
Enabled: true,
|
||||
Issuer: mgmt.Auth.Issuer,
|
||||
LocalAuthDisabled: mgmt.Auth.LocalAuthDisabled,
|
||||
SignKeyRefreshEnabled: mgmt.Auth.SignKeyRefreshEnabled,
|
||||
Storage: idp.EmbeddedStorageConfig{
|
||||
Type: mgmt.Auth.Storage.Type,
|
||||
Config: idp.EmbeddedStorageTypeConfig{
|
||||
File: storageFile,
|
||||
},
|
||||
},
|
||||
DashboardRedirectURIs: mgmt.Auth.DashboardRedirectURIs,
|
||||
CLIRedirectURIs: mgmt.Auth.CLIRedirectURIs,
|
||||
}
|
||||
|
||||
if mgmt.Auth.Owner != nil && mgmt.Auth.Owner.Email != "" {
|
||||
embeddedIdP.Owner = &idp.OwnerConfig{
|
||||
Email: mgmt.Auth.Owner.Email,
|
||||
Hash: mgmt.Auth.Owner.Password, // Will be hashed if plain text
|
||||
}
|
||||
embeddedIdP, err := c.buildEmbeddedIdPConfig(mgmt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set HTTP config fields for embedded IDP
|
||||
|
||||
@@ -140,6 +140,9 @@ func initializeConfig() error {
|
||||
os.Setenv("NB_STORE_ENGINE_MYSQL_DSN", dsn)
|
||||
}
|
||||
}
|
||||
if file := config.Server.Store.File; file != "" {
|
||||
os.Setenv("NB_STORE_ENGINE_SQLITE_FILE", file)
|
||||
}
|
||||
|
||||
if engine := config.Server.ActivityStore.Engine; engine != "" {
|
||||
engineLower := strings.ToLower(engine)
|
||||
@@ -151,6 +154,9 @@ func initializeConfig() error {
|
||||
os.Setenv("NB_ACTIVITY_EVENT_POSTGRES_DSN", dsn)
|
||||
}
|
||||
}
|
||||
if file := config.Server.ActivityStore.File; file != "" {
|
||||
os.Setenv("NB_ACTIVITY_EVENT_SQLITE_FILE", file)
|
||||
}
|
||||
|
||||
log.Infof("Starting combined NetBird server")
|
||||
logConfig(config)
|
||||
@@ -487,9 +493,6 @@ func handleTLSConfig(cfg *CombinedConfig) (*tls.Config, bool, error) {
|
||||
func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (*mgmtServer.BaseServer, error) {
|
||||
mgmt := cfg.Management
|
||||
|
||||
dnsDomain := mgmt.DnsDomain
|
||||
singleAccModeDomain := dnsDomain
|
||||
|
||||
// Extract port from listen address
|
||||
_, portStr, err := net.SplitHostPort(cfg.Server.ListenAddress)
|
||||
if err != nil {
|
||||
@@ -501,8 +504,9 @@ func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (*
|
||||
mgmtSrv := mgmtServer.NewServer(
|
||||
&mgmtServer.Config{
|
||||
NbConfig: mgmtConfig,
|
||||
DNSDomain: dnsDomain,
|
||||
MgmtSingleAccModeDomain: singleAccModeDomain,
|
||||
DNSDomain: "",
|
||||
MgmtSingleAccModeDomain: "",
|
||||
AutoResolveDomains: true,
|
||||
MgmtPort: mgmtPort,
|
||||
MgmtMetricsPort: cfg.Server.MetricsPort,
|
||||
DisableMetrics: mgmt.DisableAnonymousMetrics,
|
||||
|
||||
@@ -42,6 +42,9 @@ func withTokenStore(cmd *cobra.Command, fn func(ctx context.Context, s store.Sto
|
||||
os.Setenv("NB_STORE_ENGINE_MYSQL_DSN", dsn)
|
||||
}
|
||||
}
|
||||
if file := cfg.Server.Store.File; file != "" {
|
||||
os.Setenv("NB_STORE_ENGINE_SQLITE_FILE", file)
|
||||
}
|
||||
|
||||
datadir := cfg.Management.DataDir
|
||||
engine := types.Engine(cfg.Management.Store.Engine)
|
||||
|
||||
@@ -103,11 +103,19 @@ server:
|
||||
engine: "sqlite" # sqlite, postgres, or mysql
|
||||
dsn: "" # Connection string for postgres or mysql
|
||||
encryptionKey: ""
|
||||
# file: "" # Custom SQLite file path (optional, defaults to {dataDir}/store.db)
|
||||
|
||||
# Activity events store configuration (optional, defaults to sqlite in dataDir)
|
||||
# activityStore:
|
||||
# engine: "sqlite" # sqlite or postgres
|
||||
# dsn: "" # Connection string for postgres
|
||||
# file: "" # Custom SQLite file path (optional, defaults to {dataDir}/events.db)
|
||||
|
||||
# Auth (embedded IdP) store configuration (optional, defaults to sqlite3 in dataDir/idp.db)
|
||||
# authStore:
|
||||
# engine: "sqlite3" # sqlite3 or postgres
|
||||
# dsn: "" # Connection string for postgres (e.g., "host=localhost port=5432 user=postgres password=postgres dbname=netbird_idp sslmode=disable")
|
||||
# file: "" # Custom SQLite file path (optional, defaults to {dataDir}/idp.db)
|
||||
|
||||
# Reverse proxy settings (optional)
|
||||
# reverseProxy:
|
||||
|
||||
@@ -5,7 +5,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
@@ -195,11 +198,175 @@ func (s *Storage) OpenStorage(logger *slog.Logger) (storage.Storage, error) {
|
||||
return nil, fmt.Errorf("sqlite3 storage requires 'file' config")
|
||||
}
|
||||
return (&sql.SQLite3{File: file}).Open(logger)
|
||||
case "postgres":
|
||||
dsn, _ := s.Config["dsn"].(string)
|
||||
if dsn == "" {
|
||||
return nil, fmt.Errorf("postgres storage requires 'dsn' config")
|
||||
}
|
||||
pg, err := parsePostgresDSN(dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid postgres DSN: %w", err)
|
||||
}
|
||||
return pg.Open(logger)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported storage type: %s", s.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// parsePostgresDSN parses a DSN into a sql.Postgres config.
|
||||
// It accepts both URI format (postgres://user:pass@host:port/dbname?sslmode=disable)
|
||||
// and libpq key=value format (host=localhost port=5432 dbname=mydb), including quoted values.
|
||||
func parsePostgresDSN(dsn string) (*sql.Postgres, error) {
|
||||
var params map[string]string
|
||||
var err error
|
||||
|
||||
if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
|
||||
params, err = parsePostgresURI(dsn)
|
||||
} else {
|
||||
params, err = parsePostgresKeyValue(dsn)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
host := params["host"]
|
||||
if host == "" {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
var port uint16 = 5432
|
||||
if p, ok := params["port"]; ok && p != "" {
|
||||
v, err := strconv.ParseUint(p, 10, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid port %q: %w", p, err)
|
||||
}
|
||||
if v == 0 {
|
||||
return nil, fmt.Errorf("invalid port %q: must be non-zero", p)
|
||||
}
|
||||
port = uint16(v)
|
||||
}
|
||||
|
||||
dbname := params["dbname"]
|
||||
if dbname == "" {
|
||||
return nil, fmt.Errorf("dbname is required in DSN")
|
||||
}
|
||||
|
||||
pg := &sql.Postgres{
|
||||
NetworkDB: sql.NetworkDB{
|
||||
Host: host,
|
||||
Port: port,
|
||||
Database: dbname,
|
||||
User: params["user"],
|
||||
Password: params["password"],
|
||||
},
|
||||
}
|
||||
|
||||
if sslMode := params["sslmode"]; sslMode != "" {
|
||||
switch sslMode {
|
||||
case "disable", "allow", "prefer", "require", "verify-ca", "verify-full":
|
||||
pg.SSL.Mode = sslMode
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported sslmode %q: valid values are disable, allow, prefer, require, verify-ca, verify-full", sslMode)
|
||||
}
|
||||
}
|
||||
|
||||
return pg, nil
|
||||
}
|
||||
|
||||
// parsePostgresURI parses a postgres:// or postgresql:// URI into parameter key-value pairs.
|
||||
func parsePostgresURI(dsn string) (map[string]string, error) {
|
||||
u, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid postgres URI: %w", err)
|
||||
}
|
||||
|
||||
params := make(map[string]string)
|
||||
|
||||
if u.User != nil {
|
||||
params["user"] = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
params["password"] = p
|
||||
}
|
||||
}
|
||||
if u.Hostname() != "" {
|
||||
params["host"] = u.Hostname()
|
||||
}
|
||||
if u.Port() != "" {
|
||||
params["port"] = u.Port()
|
||||
}
|
||||
|
||||
dbname := strings.TrimPrefix(u.Path, "/")
|
||||
if dbname != "" {
|
||||
params["dbname"] = dbname
|
||||
}
|
||||
|
||||
for k, v := range u.Query() {
|
||||
if len(v) > 0 {
|
||||
params[k] = v[0]
|
||||
}
|
||||
}
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
// parsePostgresKeyValue parses a libpq key=value DSN string, handling single-quoted values
|
||||
// (e.g., password='my pass' host=localhost).
|
||||
func parsePostgresKeyValue(dsn string) (map[string]string, error) {
|
||||
params := make(map[string]string)
|
||||
s := strings.TrimSpace(dsn)
|
||||
|
||||
for s != "" {
|
||||
eqIdx := strings.IndexByte(s, '=')
|
||||
if eqIdx < 0 {
|
||||
break
|
||||
}
|
||||
key := strings.TrimSpace(s[:eqIdx])
|
||||
|
||||
value, rest, err := parseDSNValue(s[eqIdx+1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w for key %q", err, key)
|
||||
}
|
||||
|
||||
params[key] = value
|
||||
s = strings.TrimSpace(rest)
|
||||
}
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
// parseDSNValue parses the next value from a libpq key=value string positioned after the '='.
|
||||
// It returns the parsed value and the remaining unparsed string.
|
||||
func parseDSNValue(s string) (value, rest string, err error) {
|
||||
if len(s) > 0 && s[0] == '\'' {
|
||||
return parseQuotedDSNValue(s[1:])
|
||||
}
|
||||
// Unquoted value: read until whitespace.
|
||||
idx := strings.IndexAny(s, " \t\n")
|
||||
if idx < 0 {
|
||||
return s, "", nil
|
||||
}
|
||||
return s[:idx], s[idx:], nil
|
||||
}
|
||||
|
||||
// parseQuotedDSNValue parses a single-quoted value starting after the opening quote.
|
||||
// Libpq uses ” to represent a literal single quote inside quoted values.
|
||||
func parseQuotedDSNValue(s string) (value, rest string, err error) {
|
||||
var buf strings.Builder
|
||||
for len(s) > 0 {
|
||||
if s[0] == '\'' {
|
||||
if len(s) > 1 && s[1] == '\'' {
|
||||
buf.WriteByte('\'')
|
||||
s = s[2:]
|
||||
continue
|
||||
}
|
||||
return buf.String(), s[1:], nil
|
||||
}
|
||||
buf.WriteByte(s[0])
|
||||
s = s[1:]
|
||||
}
|
||||
return "", "", fmt.Errorf("unterminated quoted value")
|
||||
}
|
||||
|
||||
// Validate validates the configuration
|
||||
func (c *YAMLConfig) Validate() error {
|
||||
if c.Issuer == "" {
|
||||
|
||||
@@ -24,6 +24,8 @@ type AccessLogEntry struct {
|
||||
Reason string
|
||||
UserId string `gorm:"index"`
|
||||
AuthMethodUsed string `gorm:"index"`
|
||||
BytesUpload int64 `gorm:"index"`
|
||||
BytesDownload int64 `gorm:"index"`
|
||||
}
|
||||
|
||||
// FromProto creates an AccessLogEntry from a proto.AccessLog
|
||||
@@ -39,6 +41,8 @@ func (a *AccessLogEntry) FromProto(serviceLog *proto.AccessLog) {
|
||||
a.UserId = serviceLog.GetUserId()
|
||||
a.AuthMethodUsed = serviceLog.GetAuthMechanism()
|
||||
a.AccountID = serviceLog.GetAccountId()
|
||||
a.BytesUpload = serviceLog.GetBytesUpload()
|
||||
a.BytesDownload = serviceLog.GetBytesDownload()
|
||||
|
||||
if sourceIP := serviceLog.GetSourceIp(); sourceIP != "" {
|
||||
if ip, err := netip.ParseAddr(sourceIP); err == nil {
|
||||
@@ -101,5 +105,7 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog {
|
||||
AuthMethodUsed: authMethod,
|
||||
CountryCode: countryCode,
|
||||
CityName: cityName,
|
||||
BytesUpload: a.BytesUpload,
|
||||
BytesDownload: a.BytesDownload,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,3 +15,12 @@ type Domain struct {
|
||||
Type Type `gorm:"-"`
|
||||
Validated bool
|
||||
}
|
||||
|
||||
// EventMeta returns activity event metadata for a domain
|
||||
func (d *Domain) EventMeta() map[string]any {
|
||||
return map[string]any{
|
||||
"domain": d.Domain,
|
||||
"target_cluster": d.TargetCluster,
|
||||
"validated": d.Validated,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
|
||||
"github.com/netbirdio/netbird/management/server/account"
|
||||
"github.com/netbirdio/netbird/management/server/activity"
|
||||
"github.com/netbirdio/netbird/management/server/permissions"
|
||||
"github.com/netbirdio/netbird/management/server/permissions/modules"
|
||||
"github.com/netbirdio/netbird/management/server/permissions/operations"
|
||||
@@ -27,25 +29,25 @@ type store interface {
|
||||
DeleteCustomDomain(ctx context.Context, accountID string, domainID string) error
|
||||
}
|
||||
|
||||
type proxyURLProvider interface {
|
||||
GetConnectedProxyURLs() []string
|
||||
type proxyManager interface {
|
||||
GetActiveClusterAddresses(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
store store
|
||||
validator domain.Validator
|
||||
proxyURLProvider proxyURLProvider
|
||||
proxyManager proxyManager
|
||||
permissionsManager permissions.Manager
|
||||
accountManager account.Manager
|
||||
}
|
||||
|
||||
func NewManager(store store, proxyURLProvider proxyURLProvider, permissionsManager permissions.Manager) Manager {
|
||||
func NewManager(store store, proxyMgr proxyManager, permissionsManager permissions.Manager, accountManager account.Manager) Manager {
|
||||
return Manager{
|
||||
store: store,
|
||||
proxyURLProvider: proxyURLProvider,
|
||||
validator: domain.Validator{
|
||||
Resolver: net.DefaultResolver,
|
||||
},
|
||||
store: store,
|
||||
proxyManager: proxyMgr,
|
||||
validator: domain.Validator{Resolver: net.DefaultResolver},
|
||||
permissionsManager: permissionsManager,
|
||||
accountManager: accountManager,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,8 +69,12 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d
|
||||
|
||||
// Add connected proxy clusters as free domains.
|
||||
// The cluster address itself is the free domain base (e.g., "eu.proxy.netbird.io").
|
||||
allowList := m.proxyURLAllowList()
|
||||
log.WithFields(log.Fields{
|
||||
allowList, err := m.proxyManager.GetActiveClusterAddresses(ctx)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get active proxy cluster addresses: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
log.WithContext(ctx).WithFields(log.Fields{
|
||||
"accountID": accountID,
|
||||
"proxyAllowList": allowList,
|
||||
}).Debug("getting domains with proxy allow list")
|
||||
@@ -107,7 +113,10 @@ func (m Manager) CreateDomain(ctx context.Context, accountID, userID, domainName
|
||||
}
|
||||
|
||||
// Verify the target cluster is in the available clusters
|
||||
allowList := m.proxyURLAllowList()
|
||||
allowList, err := m.proxyManager.GetActiveClusterAddresses(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get active proxy cluster addresses: %w", err)
|
||||
}
|
||||
clusterValid := false
|
||||
for _, cluster := range allowList {
|
||||
if cluster == targetCluster {
|
||||
@@ -129,6 +138,9 @@ func (m Manager) CreateDomain(ctx context.Context, accountID, userID, domainName
|
||||
if err != nil {
|
||||
return d, fmt.Errorf("create domain in store: %w", err)
|
||||
}
|
||||
|
||||
m.accountManager.StoreEvent(ctx, userID, d.ID, accountID, activity.DomainAdded, d.EventMeta())
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@@ -141,10 +153,18 @@ func (m Manager) DeleteDomain(ctx context.Context, accountID, userID, domainID s
|
||||
return status.NewPermissionDeniedError()
|
||||
}
|
||||
|
||||
d, err := m.store.GetCustomDomain(ctx, accountID, domainID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get domain from store: %w", err)
|
||||
}
|
||||
|
||||
if err := m.store.DeleteCustomDomain(ctx, accountID, domainID); err != nil {
|
||||
// TODO: check for "no records" type error. Because that is a success condition.
|
||||
return fmt.Errorf("delete domain from store: %w", err)
|
||||
}
|
||||
|
||||
m.accountManager.StoreEvent(ctx, userID, domainID, accountID, activity.DomainDeleted, d.EventMeta())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -211,6 +231,8 @@ func (m Manager) ValidateDomain(ctx context.Context, accountID, userID, domainID
|
||||
}).WithError(err).Error("update custom domain in store")
|
||||
return
|
||||
}
|
||||
|
||||
m.accountManager.StoreEvent(context.Background(), userID, domainID, accountID, activity.DomainValidated, d.EventMeta())
|
||||
} else {
|
||||
log.WithFields(log.Fields{
|
||||
"accountID": accountID,
|
||||
@@ -221,25 +243,26 @@ func (m Manager) ValidateDomain(ctx context.Context, accountID, userID, domainID
|
||||
}
|
||||
}
|
||||
|
||||
// GetClusterDomains returns a list of proxy cluster domains.
|
||||
func (m Manager) GetClusterDomains() []string {
|
||||
return m.proxyURLAllowList()
|
||||
}
|
||||
|
||||
// proxyURLAllowList retrieves a list of currently connected proxies and
|
||||
// their URLs
|
||||
func (m Manager) proxyURLAllowList() []string {
|
||||
var reverseProxyAddresses []string
|
||||
if m.proxyURLProvider != nil {
|
||||
reverseProxyAddresses = m.proxyURLProvider.GetConnectedProxyURLs()
|
||||
if m.proxyManager == nil {
|
||||
return nil
|
||||
}
|
||||
return reverseProxyAddresses
|
||||
addresses, err := m.proxyManager.GetActiveClusterAddresses(context.Background())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return addresses
|
||||
}
|
||||
|
||||
// DeriveClusterFromDomain determines the proxy cluster for a given domain.
|
||||
// For free domains (those ending with a known cluster suffix), the cluster is extracted from the domain.
|
||||
// For custom domains, the cluster is determined by checking the registered custom domain's target cluster.
|
||||
func (m Manager) DeriveClusterFromDomain(ctx context.Context, accountID, domain string) (string, error) {
|
||||
allowList := m.proxyURLAllowList()
|
||||
allowList, err := m.proxyManager.GetActiveClusterAddresses(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get active proxy cluster addresses: %w", err)
|
||||
}
|
||||
if len(allowList) == 0 {
|
||||
return "", fmt.Errorf("no proxy clusters available")
|
||||
}
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/shared/management/status"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
exposeTTL = 90 * time.Second
|
||||
exposeReapInterval = 30 * time.Second
|
||||
maxExposesPerPeer = 10
|
||||
)
|
||||
|
||||
type trackedExpose struct {
|
||||
mu sync.Mutex
|
||||
domain string
|
||||
accountID string
|
||||
peerID string
|
||||
lastRenewed time.Time
|
||||
expiring bool
|
||||
}
|
||||
|
||||
type exposeTracker struct {
|
||||
activeExposes sync.Map
|
||||
exposeCreateMu sync.Mutex
|
||||
manager *managerImpl
|
||||
}
|
||||
|
||||
func exposeKey(peerID, domain string) string {
|
||||
return peerID + ":" + domain
|
||||
}
|
||||
|
||||
// TrackExposeIfAllowed atomically checks the per-peer limit and registers a new
|
||||
// active expose session under the same lock. Returns (true, false) if the expose
|
||||
// was already tracked (duplicate), (false, true) if tracking succeeded, and
|
||||
// (false, false) if the peer has reached the limit.
|
||||
func (t *exposeTracker) TrackExposeIfAllowed(peerID, domain, accountID string) (alreadyTracked, ok bool) {
|
||||
t.exposeCreateMu.Lock()
|
||||
defer t.exposeCreateMu.Unlock()
|
||||
|
||||
key := exposeKey(peerID, domain)
|
||||
_, loaded := t.activeExposes.LoadOrStore(key, &trackedExpose{
|
||||
domain: domain,
|
||||
accountID: accountID,
|
||||
peerID: peerID,
|
||||
lastRenewed: time.Now(),
|
||||
})
|
||||
if loaded {
|
||||
return true, false
|
||||
}
|
||||
|
||||
if t.CountPeerExposes(peerID) > maxExposesPerPeer {
|
||||
t.activeExposes.Delete(key)
|
||||
return false, false
|
||||
}
|
||||
|
||||
return false, true
|
||||
}
|
||||
|
||||
// UntrackExpose removes an active expose session from tracking.
|
||||
func (t *exposeTracker) UntrackExpose(peerID, domain string) {
|
||||
t.activeExposes.Delete(exposeKey(peerID, domain))
|
||||
}
|
||||
|
||||
// CountPeerExposes returns the number of active expose sessions for a peer.
|
||||
func (t *exposeTracker) CountPeerExposes(peerID string) int {
|
||||
count := 0
|
||||
t.activeExposes.Range(func(_, val any) bool {
|
||||
if expose := val.(*trackedExpose); expose.peerID == peerID {
|
||||
count++
|
||||
}
|
||||
return true
|
||||
})
|
||||
return count
|
||||
}
|
||||
|
||||
// MaxExposesPerPeer returns the maximum number of concurrent exposes allowed per peer.
|
||||
func (t *exposeTracker) MaxExposesPerPeer() int {
|
||||
return maxExposesPerPeer
|
||||
}
|
||||
|
||||
// RenewTrackedExpose updates the in-memory lastRenewed timestamp for a tracked expose.
|
||||
// Returns false if the expose is not tracked or is being reaped.
|
||||
func (t *exposeTracker) RenewTrackedExpose(peerID, domain string) bool {
|
||||
key := exposeKey(peerID, domain)
|
||||
val, ok := t.activeExposes.Load(key)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
expose := val.(*trackedExpose)
|
||||
expose.mu.Lock()
|
||||
if expose.expiring {
|
||||
expose.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
expose.lastRenewed = time.Now()
|
||||
expose.mu.Unlock()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// StopTrackedExpose removes an active expose session from tracking.
|
||||
// Returns false if the expose was not tracked.
|
||||
func (t *exposeTracker) StopTrackedExpose(peerID, domain string) bool {
|
||||
key := exposeKey(peerID, domain)
|
||||
_, ok := t.activeExposes.LoadAndDelete(key)
|
||||
return ok
|
||||
}
|
||||
|
||||
// StartExposeReaper starts a background goroutine that reaps expired expose sessions.
|
||||
func (t *exposeTracker) StartExposeReaper(ctx context.Context) {
|
||||
go func() {
|
||||
ticker := time.NewTicker(exposeReapInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
t.reapExpiredExposes()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (t *exposeTracker) reapExpiredExposes() {
|
||||
t.activeExposes.Range(func(key, val any) bool {
|
||||
expose := val.(*trackedExpose)
|
||||
expose.mu.Lock()
|
||||
expired := time.Since(expose.lastRenewed) > exposeTTL
|
||||
if expired {
|
||||
expose.expiring = true
|
||||
}
|
||||
expose.mu.Unlock()
|
||||
|
||||
if !expired {
|
||||
return true
|
||||
}
|
||||
|
||||
log.Infof("reaping expired expose session for peer %s, domain %s", expose.peerID, expose.domain)
|
||||
|
||||
err := t.manager.deleteServiceFromPeer(context.Background(), expose.accountID, expose.peerID, expose.domain, true)
|
||||
|
||||
s, _ := status.FromError(err)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
t.activeExposes.Delete(key)
|
||||
case s.ErrorType == status.NotFound:
|
||||
log.Debugf("service %s was already deleted", expose.domain)
|
||||
default:
|
||||
log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", expose.domain, err)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
)
|
||||
|
||||
func TestExposeKey(t *testing.T) {
|
||||
assert.Equal(t, "peer1:example.com", exposeKey("peer1", "example.com"))
|
||||
assert.Equal(t, "peer2:other.com", exposeKey("peer2", "other.com"))
|
||||
assert.NotEqual(t, exposeKey("peer1", "a.com"), exposeKey("peer1", "b.com"))
|
||||
}
|
||||
|
||||
func TestTrackExposeIfAllowed(t *testing.T) {
|
||||
t.Run("first track succeeds", func(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
assert.False(t, alreadyTracked, "first track should not be duplicate")
|
||||
assert.True(t, ok, "first track should be allowed")
|
||||
})
|
||||
|
||||
t.Run("duplicate track detected", func(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
|
||||
alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
assert.True(t, alreadyTracked, "second track should be duplicate")
|
||||
assert.False(t, ok)
|
||||
})
|
||||
|
||||
t.Run("rejects when at limit", func(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
for i := range maxExposesPerPeer {
|
||||
_, ok := tracker.TrackExposeIfAllowed("peer1", "domain-"+string(rune('a'+i))+".com", "acct1")
|
||||
assert.True(t, ok, "track %d should be allowed", i)
|
||||
}
|
||||
|
||||
alreadyTracked, ok := tracker.TrackExposeIfAllowed("peer1", "over-limit.com", "acct1")
|
||||
assert.False(t, alreadyTracked)
|
||||
assert.False(t, ok, "should reject when at limit")
|
||||
})
|
||||
|
||||
t.Run("other peer unaffected by limit", func(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
for i := range maxExposesPerPeer {
|
||||
tracker.TrackExposeIfAllowed("peer1", "domain-"+string(rune('a'+i))+".com", "acct1")
|
||||
}
|
||||
|
||||
_, ok := tracker.TrackExposeIfAllowed("peer2", "a.com", "acct1")
|
||||
assert.True(t, ok, "other peer should still be within limit")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUntrackExpose(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
|
||||
tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
assert.Equal(t, 1, tracker.CountPeerExposes("peer1"))
|
||||
|
||||
tracker.UntrackExpose("peer1", "a.com")
|
||||
assert.Equal(t, 0, tracker.CountPeerExposes("peer1"))
|
||||
}
|
||||
|
||||
func TestCountPeerExposes(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
|
||||
assert.Equal(t, 0, tracker.CountPeerExposes("peer1"))
|
||||
|
||||
tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
tracker.TrackExposeIfAllowed("peer1", "b.com", "acct1")
|
||||
tracker.TrackExposeIfAllowed("peer2", "a.com", "acct1")
|
||||
|
||||
assert.Equal(t, 2, tracker.CountPeerExposes("peer1"), "peer1 should have 2 exposes")
|
||||
assert.Equal(t, 1, tracker.CountPeerExposes("peer2"), "peer2 should have 1 expose")
|
||||
assert.Equal(t, 0, tracker.CountPeerExposes("peer3"), "peer3 should have 0 exposes")
|
||||
}
|
||||
|
||||
func TestMaxExposesPerPeer(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
assert.Equal(t, maxExposesPerPeer, tracker.MaxExposesPerPeer())
|
||||
}
|
||||
|
||||
func TestRenewTrackedExpose(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
|
||||
found := tracker.RenewTrackedExpose("peer1", "a.com")
|
||||
assert.False(t, found, "should not find untracked expose")
|
||||
|
||||
tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
|
||||
found = tracker.RenewTrackedExpose("peer1", "a.com")
|
||||
assert.True(t, found, "should find tracked expose")
|
||||
}
|
||||
|
||||
func TestRenewTrackedExpose_RejectsExpiring(t *testing.T) {
|
||||
tracker := &exposeTracker{}
|
||||
tracker.TrackExposeIfAllowed("peer1", "a.com", "acct1")
|
||||
|
||||
// Simulate reaper marking the expose as expiring
|
||||
key := exposeKey("peer1", "a.com")
|
||||
val, _ := tracker.activeExposes.Load(key)
|
||||
expose := val.(*trackedExpose)
|
||||
expose.mu.Lock()
|
||||
expose.expiring = true
|
||||
expose.mu.Unlock()
|
||||
|
||||
found := tracker.RenewTrackedExpose("peer1", "a.com")
|
||||
assert.False(t, found, "should reject renewal when expiring")
|
||||
}
|
||||
|
||||
func TestReapExpiredExposes(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
tracker := mgr.exposeTracker
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Manually expire the tracked entry
|
||||
key := exposeKey(testPeerID, resp.Domain)
|
||||
val, _ := tracker.activeExposes.Load(key)
|
||||
expose := val.(*trackedExpose)
|
||||
expose.mu.Lock()
|
||||
expose.lastRenewed = time.Now().Add(-2 * exposeTTL)
|
||||
expose.mu.Unlock()
|
||||
|
||||
// Add an active (non-expired) tracking entry
|
||||
tracker.activeExposes.Store(exposeKey("peer1", "active.com"), &trackedExpose{
|
||||
domain: "active.com",
|
||||
accountID: testAccountID,
|
||||
peerID: "peer1",
|
||||
lastRenewed: time.Now(),
|
||||
})
|
||||
|
||||
tracker.reapExpiredExposes()
|
||||
|
||||
_, exists := tracker.activeExposes.Load(key)
|
||||
assert.False(t, exists, "expired expose should be removed")
|
||||
|
||||
_, exists = tracker.activeExposes.Load(exposeKey("peer1", "active.com"))
|
||||
assert.True(t, exists, "active expose should remain")
|
||||
}
|
||||
|
||||
func TestReapExpiredExposes_SetsExpiringFlag(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
tracker := mgr.exposeTracker
|
||||
|
||||
ctx := context.Background()
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
key := exposeKey(testPeerID, resp.Domain)
|
||||
val, _ := tracker.activeExposes.Load(key)
|
||||
expose := val.(*trackedExpose)
|
||||
|
||||
// Expire it
|
||||
expose.mu.Lock()
|
||||
expose.lastRenewed = time.Now().Add(-2 * exposeTTL)
|
||||
expose.mu.Unlock()
|
||||
|
||||
// Renew should succeed before reaping
|
||||
assert.True(t, tracker.RenewTrackedExpose(testPeerID, resp.Domain), "renew should succeed before reaper runs")
|
||||
|
||||
// Re-expire and reap
|
||||
expose.mu.Lock()
|
||||
expose.lastRenewed = time.Now().Add(-2 * exposeTTL)
|
||||
expose.mu.Unlock()
|
||||
|
||||
tracker.reapExpiredExposes()
|
||||
|
||||
// Entry is deleted, renew returns false
|
||||
assert.False(t, tracker.RenewTrackedExpose(testPeerID, resp.Domain), "renew should fail after reap")
|
||||
}
|
||||
|
||||
func TestConcurrentTrackAndCount(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
tracker := mgr.exposeTracker
|
||||
ctx := context.Background()
|
||||
|
||||
for i := range 5 {
|
||||
_, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
Port: 8080 + i,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Manually expire all tracked entries
|
||||
tracker.activeExposes.Range(func(_, val any) bool {
|
||||
expose := val.(*trackedExpose)
|
||||
expose.mu.Lock()
|
||||
expose.lastRenewed = time.Now().Add(-2 * exposeTTL)
|
||||
expose.mu.Unlock()
|
||||
return true
|
||||
})
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tracker.reapExpiredExposes()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tracker.CountPeerExposes(testPeerID)
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, 0, tracker.CountPeerExposes(testPeerID), "all expired exposes should be reaped")
|
||||
}
|
||||
|
||||
func TestTrackedExposeMutexProtectsLastRenewed(t *testing.T) {
|
||||
expose := &trackedExpose{
|
||||
lastRenewed: time.Now().Add(-1 * time.Hour),
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 100 {
|
||||
expose.mu.Lock()
|
||||
expose.lastRenewed = time.Now()
|
||||
expose.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 100 {
|
||||
expose.mu.Lock()
|
||||
_ = time.Since(expose.lastRenewed)
|
||||
expose.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
expose.mu.Lock()
|
||||
require.False(t, expose.lastRenewed.IsZero(), "lastRenewed should not be zero after concurrent access")
|
||||
expose.mu.Unlock()
|
||||
}
|
||||
36
management/internals/modules/reverseproxy/proxy/manager.go
Normal file
36
management/internals/modules/reverseproxy/proxy/manager.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package proxy
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -package proxy -destination=manager_mock.go -source=./manager.go -build_flags=-mod=mod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
// Manager defines the interface for proxy operations
|
||||
type Manager interface {
|
||||
Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error
|
||||
Disconnect(ctx context.Context, proxyID string) error
|
||||
Heartbeat(ctx context.Context, proxyID string) error
|
||||
GetActiveClusterAddresses(ctx context.Context) ([]string, error)
|
||||
CleanupStale(ctx context.Context, inactivityDuration time.Duration) error
|
||||
}
|
||||
|
||||
// OIDCValidationConfig contains the OIDC configuration needed for token validation.
|
||||
type OIDCValidationConfig struct {
|
||||
Issuer string
|
||||
Audiences []string
|
||||
KeysLocation string
|
||||
MaxTokenAgeSeconds int64
|
||||
}
|
||||
|
||||
// Controller is responsible for managing proxy clusters and routing service updates.
|
||||
type Controller interface {
|
||||
SendServiceUpdateToCluster(ctx context.Context, accountID string, update *proto.ProxyMapping, clusterAddr string)
|
||||
GetOIDCValidationConfig() OIDCValidationConfig
|
||||
RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error
|
||||
UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error
|
||||
GetProxiesForCluster(clusterAddr string) []string
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
// GRPCController is a concrete implementation that manages proxy clusters and sends updates directly via gRPC.
|
||||
type GRPCController struct {
|
||||
proxyGRPCServer *nbgrpc.ProxyServiceServer
|
||||
// Map of cluster address -> set of proxy IDs
|
||||
clusterProxies sync.Map
|
||||
metrics *metrics
|
||||
}
|
||||
|
||||
// NewGRPCController creates a new GRPCController.
|
||||
func NewGRPCController(proxyGRPCServer *nbgrpc.ProxyServiceServer, meter metric.Meter) (*GRPCController, error) {
|
||||
m, err := newMetrics(meter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &GRPCController{
|
||||
proxyGRPCServer: proxyGRPCServer,
|
||||
metrics: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SendServiceUpdateToCluster sends a service update to a specific proxy cluster.
|
||||
func (c *GRPCController) SendServiceUpdateToCluster(ctx context.Context, accountID string, update *proto.ProxyMapping, clusterAddr string) {
|
||||
c.proxyGRPCServer.SendServiceUpdateToCluster(ctx, update, clusterAddr)
|
||||
c.metrics.IncrementServiceUpdateSendCount(clusterAddr)
|
||||
}
|
||||
|
||||
// GetOIDCValidationConfig returns the OIDC validation configuration from the gRPC server.
|
||||
func (c *GRPCController) GetOIDCValidationConfig() proxy.OIDCValidationConfig {
|
||||
return c.proxyGRPCServer.GetOIDCValidationConfig()
|
||||
}
|
||||
|
||||
// RegisterProxyToCluster registers a proxy to a specific cluster for routing.
|
||||
func (c *GRPCController) RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error {
|
||||
if clusterAddr == "" {
|
||||
return nil
|
||||
}
|
||||
proxySet, _ := c.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{})
|
||||
proxySet.(*sync.Map).Store(proxyID, struct{}{})
|
||||
log.WithContext(ctx).Debugf("Registered proxy %s to cluster %s", proxyID, clusterAddr)
|
||||
|
||||
c.metrics.IncrementProxyConnectionCount(clusterAddr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnregisterProxyFromCluster removes a proxy from a cluster.
|
||||
func (c *GRPCController) UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error {
|
||||
if clusterAddr == "" {
|
||||
return nil
|
||||
}
|
||||
if proxySet, ok := c.clusterProxies.Load(clusterAddr); ok {
|
||||
proxySet.(*sync.Map).Delete(proxyID)
|
||||
log.WithContext(ctx).Debugf("Unregistered proxy %s from cluster %s", proxyID, clusterAddr)
|
||||
|
||||
c.metrics.DecrementProxyConnectionCount(clusterAddr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProxiesForCluster returns all proxy IDs registered for a specific cluster.
|
||||
func (c *GRPCController) GetProxiesForCluster(clusterAddr string) []string {
|
||||
proxySet, ok := c.clusterProxies.Load(clusterAddr)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var proxies []string
|
||||
proxySet.(*sync.Map).Range(func(key, _ interface{}) bool {
|
||||
proxies = append(proxies, key.(string))
|
||||
return true
|
||||
})
|
||||
return proxies
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
)
|
||||
|
||||
// store defines the interface for proxy persistence operations
|
||||
type store interface {
|
||||
SaveProxy(ctx context.Context, p *proxy.Proxy) error
|
||||
UpdateProxyHeartbeat(ctx context.Context, proxyID string) error
|
||||
GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error)
|
||||
CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error
|
||||
}
|
||||
|
||||
// Manager handles all proxy operations
|
||||
type Manager struct {
|
||||
store store
|
||||
metrics *metrics
|
||||
}
|
||||
|
||||
// NewManager creates a new proxy Manager
|
||||
func NewManager(store store, meter metric.Meter) (*Manager, error) {
|
||||
m, err := newMetrics(meter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Manager{
|
||||
store: store,
|
||||
metrics: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Connect registers a new proxy connection in the database
|
||||
func (m Manager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error {
|
||||
now := time.Now()
|
||||
p := &proxy.Proxy{
|
||||
ID: proxyID,
|
||||
ClusterAddress: clusterAddress,
|
||||
IPAddress: ipAddress,
|
||||
LastSeen: now,
|
||||
ConnectedAt: &now,
|
||||
Status: "connected",
|
||||
}
|
||||
|
||||
if err := m.store.SaveProxy(ctx, p); err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to register proxy %s: %v", proxyID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithContext(ctx).WithFields(log.Fields{
|
||||
"proxyID": proxyID,
|
||||
"clusterAddress": clusterAddress,
|
||||
"ipAddress": ipAddress,
|
||||
}).Info("proxy connected")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disconnect marks a proxy as disconnected in the database
|
||||
func (m Manager) Disconnect(ctx context.Context, proxyID string) error {
|
||||
now := time.Now()
|
||||
p := &proxy.Proxy{
|
||||
ID: proxyID,
|
||||
Status: "disconnected",
|
||||
DisconnectedAt: &now,
|
||||
LastSeen: now,
|
||||
}
|
||||
|
||||
if err := m.store.SaveProxy(ctx, p); err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to disconnect proxy %s: %v", proxyID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithContext(ctx).WithFields(log.Fields{
|
||||
"proxyID": proxyID,
|
||||
}).Info("proxy disconnected")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Heartbeat updates the proxy's last seen timestamp
|
||||
func (m Manager) Heartbeat(ctx context.Context, proxyID string) error {
|
||||
if err := m.store.UpdateProxyHeartbeat(ctx, proxyID); err != nil {
|
||||
log.WithContext(ctx).Debugf("failed to update proxy %s heartbeat: %v", proxyID, err)
|
||||
return err
|
||||
}
|
||||
m.metrics.IncrementProxyHeartbeatCount()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetActiveClusterAddresses returns all unique cluster addresses for active proxies
|
||||
func (m Manager) GetActiveClusterAddresses(ctx context.Context) ([]string, error) {
|
||||
addresses, err := m.store.GetActiveProxyClusterAddresses(ctx)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get active proxy cluster addresses: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// CleanupStale removes proxies that haven't sent heartbeat in the specified duration
|
||||
func (m Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error {
|
||||
if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to cleanup stale proxies: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
type metrics struct {
|
||||
proxyConnectionCount metric.Int64UpDownCounter
|
||||
serviceUpdateSendCount metric.Int64Counter
|
||||
proxyHeartbeatCount metric.Int64Counter
|
||||
}
|
||||
|
||||
func newMetrics(meter metric.Meter) (*metrics, error) {
|
||||
proxyConnectionCount, err := meter.Int64UpDownCounter(
|
||||
"management_proxy_connection_count",
|
||||
metric.WithDescription("Number of active proxy connections"),
|
||||
metric.WithUnit("{connection}"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceUpdateSendCount, err := meter.Int64Counter(
|
||||
"management_proxy_service_update_send_count",
|
||||
metric.WithDescription("Total number of service updates sent to proxies"),
|
||||
metric.WithUnit("{update}"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxyHeartbeatCount, err := meter.Int64Counter(
|
||||
"management_proxy_heartbeat_count",
|
||||
metric.WithDescription("Total number of proxy heartbeats received"),
|
||||
metric.WithUnit("{heartbeat}"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metrics{
|
||||
proxyConnectionCount: proxyConnectionCount,
|
||||
serviceUpdateSendCount: serviceUpdateSendCount,
|
||||
proxyHeartbeatCount: proxyHeartbeatCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *metrics) IncrementProxyConnectionCount(clusterAddr string) {
|
||||
m.proxyConnectionCount.Add(context.Background(), 1,
|
||||
metric.WithAttributes(
|
||||
attribute.String("cluster", clusterAddr),
|
||||
))
|
||||
}
|
||||
|
||||
func (m *metrics) DecrementProxyConnectionCount(clusterAddr string) {
|
||||
m.proxyConnectionCount.Add(context.Background(), -1,
|
||||
metric.WithAttributes(
|
||||
attribute.String("cluster", clusterAddr),
|
||||
))
|
||||
}
|
||||
|
||||
func (m *metrics) IncrementServiceUpdateSendCount(clusterAddr string) {
|
||||
m.serviceUpdateSendCount.Add(context.Background(), 1,
|
||||
metric.WithAttributes(
|
||||
attribute.String("cluster", clusterAddr),
|
||||
))
|
||||
}
|
||||
|
||||
func (m *metrics) IncrementProxyHeartbeatCount() {
|
||||
m.proxyHeartbeatCount.Add(context.Background(), 1)
|
||||
}
|
||||
199
management/internals/modules/reverseproxy/proxy/manager_mock.go
Normal file
199
management/internals/modules/reverseproxy/proxy/manager_mock.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: ./manager.go
|
||||
|
||||
// Package proxy is a generated GoMock package.
|
||||
package proxy
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
time "time"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
proto "github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
// MockManager is a mock of Manager interface.
|
||||
type MockManager struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockManagerMockRecorder
|
||||
}
|
||||
|
||||
// MockManagerMockRecorder is the mock recorder for MockManager.
|
||||
type MockManagerMockRecorder struct {
|
||||
mock *MockManager
|
||||
}
|
||||
|
||||
// NewMockManager creates a new mock instance.
|
||||
func NewMockManager(ctrl *gomock.Controller) *MockManager {
|
||||
mock := &MockManager{ctrl: ctrl}
|
||||
mock.recorder = &MockManagerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// CleanupStale mocks base method.
|
||||
func (m *MockManager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CleanupStale", ctx, inactivityDuration)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// CleanupStale indicates an expected call of CleanupStale.
|
||||
func (mr *MockManagerMockRecorder) CleanupStale(ctx, inactivityDuration interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStale", reflect.TypeOf((*MockManager)(nil).CleanupStale), ctx, inactivityDuration)
|
||||
}
|
||||
|
||||
// Connect mocks base method.
|
||||
func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Connect", ctx, proxyID, clusterAddress, ipAddress)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Connect indicates an expected call of Connect.
|
||||
func (mr *MockManagerMockRecorder) Connect(ctx, proxyID, clusterAddress, ipAddress interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connect", reflect.TypeOf((*MockManager)(nil).Connect), ctx, proxyID, clusterAddress, ipAddress)
|
||||
}
|
||||
|
||||
// Disconnect mocks base method.
|
||||
func (m *MockManager) Disconnect(ctx context.Context, proxyID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Disconnect", ctx, proxyID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Disconnect indicates an expected call of Disconnect.
|
||||
func (mr *MockManagerMockRecorder) Disconnect(ctx, proxyID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnect", reflect.TypeOf((*MockManager)(nil).Disconnect), ctx, proxyID)
|
||||
}
|
||||
|
||||
// GetActiveClusterAddresses mocks base method.
|
||||
func (m *MockManager) GetActiveClusterAddresses(ctx context.Context) ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetActiveClusterAddresses", ctx)
|
||||
ret0, _ := ret[0].([]string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetActiveClusterAddresses indicates an expected call of GetActiveClusterAddresses.
|
||||
func (mr *MockManagerMockRecorder) GetActiveClusterAddresses(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveClusterAddresses", reflect.TypeOf((*MockManager)(nil).GetActiveClusterAddresses), ctx)
|
||||
}
|
||||
|
||||
// Heartbeat mocks base method.
|
||||
func (m *MockManager) Heartbeat(ctx context.Context, proxyID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Heartbeat", ctx, proxyID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Heartbeat indicates an expected call of Heartbeat.
|
||||
func (mr *MockManagerMockRecorder) Heartbeat(ctx, proxyID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Heartbeat", reflect.TypeOf((*MockManager)(nil).Heartbeat), ctx, proxyID)
|
||||
}
|
||||
|
||||
// MockController is a mock of Controller interface.
|
||||
type MockController struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockControllerMockRecorder
|
||||
}
|
||||
|
||||
// MockControllerMockRecorder is the mock recorder for MockController.
|
||||
type MockControllerMockRecorder struct {
|
||||
mock *MockController
|
||||
}
|
||||
|
||||
// NewMockController creates a new mock instance.
|
||||
func NewMockController(ctrl *gomock.Controller) *MockController {
|
||||
mock := &MockController{ctrl: ctrl}
|
||||
mock.recorder = &MockControllerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockController) EXPECT() *MockControllerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// GetOIDCValidationConfig mocks base method.
|
||||
func (m *MockController) GetOIDCValidationConfig() OIDCValidationConfig {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetOIDCValidationConfig")
|
||||
ret0, _ := ret[0].(OIDCValidationConfig)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetOIDCValidationConfig indicates an expected call of GetOIDCValidationConfig.
|
||||
func (mr *MockControllerMockRecorder) GetOIDCValidationConfig() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOIDCValidationConfig", reflect.TypeOf((*MockController)(nil).GetOIDCValidationConfig))
|
||||
}
|
||||
|
||||
// GetProxiesForCluster mocks base method.
|
||||
func (m *MockController) GetProxiesForCluster(clusterAddr string) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetProxiesForCluster", clusterAddr)
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetProxiesForCluster indicates an expected call of GetProxiesForCluster.
|
||||
func (mr *MockControllerMockRecorder) GetProxiesForCluster(clusterAddr interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProxiesForCluster", reflect.TypeOf((*MockController)(nil).GetProxiesForCluster), clusterAddr)
|
||||
}
|
||||
|
||||
// RegisterProxyToCluster mocks base method.
|
||||
func (m *MockController) RegisterProxyToCluster(ctx context.Context, clusterAddr, proxyID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RegisterProxyToCluster", ctx, clusterAddr, proxyID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// RegisterProxyToCluster indicates an expected call of RegisterProxyToCluster.
|
||||
func (mr *MockControllerMockRecorder) RegisterProxyToCluster(ctx, clusterAddr, proxyID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterProxyToCluster", reflect.TypeOf((*MockController)(nil).RegisterProxyToCluster), ctx, clusterAddr, proxyID)
|
||||
}
|
||||
|
||||
// SendServiceUpdateToCluster mocks base method.
|
||||
func (m *MockController) SendServiceUpdateToCluster(ctx context.Context, accountID string, update *proto.ProxyMapping, clusterAddr string) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SendServiceUpdateToCluster", ctx, accountID, update, clusterAddr)
|
||||
}
|
||||
|
||||
// SendServiceUpdateToCluster indicates an expected call of SendServiceUpdateToCluster.
|
||||
func (mr *MockControllerMockRecorder) SendServiceUpdateToCluster(ctx, accountID, update, clusterAddr interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendServiceUpdateToCluster", reflect.TypeOf((*MockController)(nil).SendServiceUpdateToCluster), ctx, accountID, update, clusterAddr)
|
||||
}
|
||||
|
||||
// UnregisterProxyFromCluster mocks base method.
|
||||
func (m *MockController) UnregisterProxyFromCluster(ctx context.Context, clusterAddr, proxyID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UnregisterProxyFromCluster", ctx, clusterAddr, proxyID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UnregisterProxyFromCluster indicates an expected call of UnregisterProxyFromCluster.
|
||||
func (mr *MockControllerMockRecorder) UnregisterProxyFromCluster(ctx, clusterAddr, proxyID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnregisterProxyFromCluster", reflect.TypeOf((*MockController)(nil).UnregisterProxyFromCluster), ctx, clusterAddr, proxyID)
|
||||
}
|
||||
20
management/internals/modules/reverseproxy/proxy/proxy.go
Normal file
20
management/internals/modules/reverseproxy/proxy/proxy.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package proxy
|
||||
|
||||
import "time"
|
||||
|
||||
// Proxy represents a reverse proxy instance
|
||||
type Proxy struct {
|
||||
ID string `gorm:"primaryKey;type:varchar(255)"`
|
||||
ClusterAddress string `gorm:"type:varchar(255);not null;index:idx_proxy_cluster_status"`
|
||||
IPAddress string `gorm:"type:varchar(45)"`
|
||||
LastSeen time.Time `gorm:"not null;index:idx_proxy_last_seen"`
|
||||
ConnectedAt *time.Time
|
||||
DisconnectedAt *time.Time
|
||||
Status string `gorm:"type:varchar(20);not null;index:idx_proxy_cluster_status"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
func (Proxy) TableName() string {
|
||||
return "proxies"
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
package reverseproxy
|
||||
package service
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -package reverseproxy -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod
|
||||
//go:generate go run github.com/golang/mock/mockgen -package service -destination=interface_mock.go -source=./interface.go -build_flags=-mod=mod
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -14,7 +14,7 @@ type Manager interface {
|
||||
DeleteService(ctx context.Context, accountID, userID, serviceID string) error
|
||||
DeleteAllServices(ctx context.Context, accountID, userID string) error
|
||||
SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error
|
||||
SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error
|
||||
SetStatus(ctx context.Context, accountID, serviceID string, status Status) error
|
||||
ReloadAllServicesForAccount(ctx context.Context, accountID string) error
|
||||
ReloadService(ctx context.Context, accountID, serviceID string) error
|
||||
GetGlobalServices(ctx context.Context) ([]*Service, error)
|
||||
@@ -1,8 +1,8 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: ./interface.go
|
||||
|
||||
// Package reverseproxy is a generated GoMock package.
|
||||
package reverseproxy
|
||||
// Package service is a generated GoMock package.
|
||||
package service
|
||||
|
||||
import (
|
||||
context "context"
|
||||
@@ -239,7 +239,7 @@ func (mr *MockManagerMockRecorder) SetCertificateIssuedAt(ctx, accountID, servic
|
||||
}
|
||||
|
||||
// SetStatus mocks base method.
|
||||
func (m *MockManager) SetStatus(ctx context.Context, accountID, serviceID string, status ProxyStatus) error {
|
||||
func (m *MockManager) SetStatus(ctx context.Context, accountID, serviceID string, status Status) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetStatus", ctx, accountID, serviceID, status)
|
||||
ret0, _ := ret[0].(error)
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager"
|
||||
domainmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
nbcontext "github.com/netbirdio/netbird/management/server/context"
|
||||
"github.com/netbirdio/netbird/shared/management/http/api"
|
||||
"github.com/netbirdio/netbird/shared/management/http/util"
|
||||
@@ -17,11 +17,11 @@ import (
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
manager reverseproxy.Manager
|
||||
manager rpservice.Manager
|
||||
}
|
||||
|
||||
// RegisterEndpoints registers all service HTTP endpoints.
|
||||
func RegisterEndpoints(manager reverseproxy.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, router *mux.Router) {
|
||||
func RegisterEndpoints(manager rpservice.Manager, domainManager domainmanager.Manager, accessLogsManager accesslogs.Manager, router *mux.Router) {
|
||||
h := &handler{
|
||||
manager: manager,
|
||||
}
|
||||
@@ -72,8 +72,11 @@ func (h *handler) createService(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
service := new(reverseproxy.Service)
|
||||
service.FromAPIRequest(&req, userAuth.AccountId)
|
||||
service := new(rpservice.Service)
|
||||
if err = service.FromAPIRequest(&req, userAuth.AccountId); err != nil {
|
||||
util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w)
|
||||
return
|
||||
}
|
||||
|
||||
if err = service.Validate(); err != nil {
|
||||
util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w)
|
||||
@@ -130,9 +133,12 @@ func (h *handler) updateService(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
service := new(reverseproxy.Service)
|
||||
service := new(rpservice.Service)
|
||||
service.ID = serviceID
|
||||
service.FromAPIRequest(&req, userAuth.AccountId)
|
||||
if err = service.FromAPIRequest(&req, userAuth.AccountId); err != nil {
|
||||
util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w)
|
||||
return
|
||||
}
|
||||
|
||||
if err = service.Validate(); err != nil {
|
||||
util.WriteError(r.Context(), status.Errorf(status.InvalidArgument, "%s", err.Error()), w)
|
||||
@@ -0,0 +1,65 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand/v2"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/shared/management/status"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
exposeTTL = 90 * time.Second
|
||||
exposeReapInterval = 30 * time.Second
|
||||
maxExposesPerPeer = 10
|
||||
exposeReapBatch = 100
|
||||
)
|
||||
|
||||
type exposeReaper struct {
|
||||
manager *Manager
|
||||
}
|
||||
|
||||
// StartExposeReaper starts a background goroutine that reaps expired ephemeral services from the DB.
|
||||
func (r *exposeReaper) StartExposeReaper(ctx context.Context) {
|
||||
go func() {
|
||||
// start with a random delay
|
||||
rn := rand.IntN(10)
|
||||
time.Sleep(time.Duration(rn) * time.Second)
|
||||
|
||||
ticker := time.NewTicker(exposeReapInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
r.reapExpiredExposes(ctx)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *exposeReaper) reapExpiredExposes(ctx context.Context) {
|
||||
expired, err := r.manager.store.GetExpiredEphemeralServices(ctx, exposeTTL, exposeReapBatch)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get expired ephemeral services: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, svc := range expired {
|
||||
log.Infof("reaping expired expose session for peer %s, domain %s", svc.SourcePeer, svc.Domain)
|
||||
|
||||
err := r.manager.deleteExpiredPeerService(ctx, svc.AccountID, svc.SourcePeer, svc.ID)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if s, ok := status.FromError(err); ok && s.ErrorType == status.NotFound {
|
||||
log.Debugf("service %s was already deleted by another instance", svc.Domain)
|
||||
} else {
|
||||
log.Errorf("failed to delete expired peer-exposed service for domain %s: %v", svc.Domain, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,208 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
)
|
||||
|
||||
func TestReapExpiredExposes(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Manually expire the service by backdating meta_last_renewed_at
|
||||
expireEphemeralService(t, testStore, testAccountID, resp.Domain)
|
||||
|
||||
// Create a non-expired service
|
||||
resp2, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8081,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr.exposeReaper.reapExpiredExposes(ctx)
|
||||
|
||||
// Expired service should be deleted
|
||||
_, err = testStore.GetServiceByDomain(ctx, resp.Domain)
|
||||
require.Error(t, err, "expired service should be deleted")
|
||||
|
||||
// Non-expired service should remain
|
||||
_, err = testStore.GetServiceByDomain(ctx, resp2.Domain)
|
||||
require.NoError(t, err, "active service should remain")
|
||||
}
|
||||
|
||||
func TestReapAlreadyDeletedService(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
expireEphemeralService(t, testStore, testAccountID, resp.Domain)
|
||||
|
||||
// Delete the service before reaping
|
||||
err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reaping should handle the already-deleted service gracefully
|
||||
mgr.exposeReaper.reapExpiredExposes(ctx)
|
||||
}
|
||||
|
||||
func TestConcurrentReapAndRenew(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := range 5 {
|
||||
_, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8080 + i,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Expire all services
|
||||
services, err := testStore.GetAccountServices(ctx, store.LockingStrengthNone, testAccountID)
|
||||
require.NoError(t, err)
|
||||
for _, svc := range services {
|
||||
if svc.Source == rpservice.SourceEphemeral {
|
||||
expireEphemeralService(t, testStore, testAccountID, svc.Domain)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
mgr.exposeReaper.reapExpiredExposes(ctx)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, _ = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), count, "all expired services should be reaped")
|
||||
}
|
||||
|
||||
func TestRenewEphemeralService(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("renew succeeds for active service", func(t *testing.T) {
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8082,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("renew fails for nonexistent domain", func(t *testing.T) {
|
||||
err := mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, "nonexistent.com")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no active expose session")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCountAndExistsEphemeralServices(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), count)
|
||||
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8083,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), count)
|
||||
|
||||
exists, err := mgr.store.EphemeralServiceExists(ctx, store.LockingStrengthNone, testAccountID, testPeerID, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, exists, "service should exist")
|
||||
|
||||
exists, err = mgr.store.EphemeralServiceExists(ctx, store.LockingStrengthNone, testAccountID, testPeerID, "no-such.domain")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, exists, "non-existent service should not exist")
|
||||
}
|
||||
|
||||
func TestMaxExposesPerPeerEnforced(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := range maxExposesPerPeer {
|
||||
_, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8090 + i,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err, "expose %d should succeed", i)
|
||||
}
|
||||
|
||||
_, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 9999,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "maximum number of active expose sessions")
|
||||
}
|
||||
|
||||
func TestReapSkipsRenewedService(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8086,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expire the service
|
||||
expireEphemeralService(t, testStore, testAccountID, resp.Domain)
|
||||
|
||||
// Renew it before the reaper runs
|
||||
err = mgr.RenewServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reaper should skip it because the re-check sees a fresh timestamp
|
||||
mgr.exposeReaper.reapExpiredExposes(ctx)
|
||||
|
||||
_, err = testStore.GetServiceByDomain(ctx, resp.Domain)
|
||||
require.NoError(t, err, "renewed service should survive reaping")
|
||||
}
|
||||
|
||||
// expireEphemeralService backdates meta_last_renewed_at to force expiration.
|
||||
func expireEphemeralService(t *testing.T, s store.Store, accountID, domain string) {
|
||||
t.Helper()
|
||||
svc, err := s.GetServiceByDomain(context.Background(), domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
expired := time.Now().Add(-2 * exposeTTL)
|
||||
svc.Meta.LastRenewedAt = &expired
|
||||
err = s.UpdateService(context.Background(), svc)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -4,24 +4,22 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand/v2"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"slices"
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
"github.com/netbirdio/netbird/management/server/account"
|
||||
"github.com/netbirdio/netbird/management/server/activity"
|
||||
"github.com/netbirdio/netbird/management/server/permissions"
|
||||
"github.com/netbirdio/netbird/management/server/permissions/modules"
|
||||
"github.com/netbirdio/netbird/management/server/permissions/operations"
|
||||
"github.com/netbirdio/netbird/management/server/settings"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
"github.com/netbirdio/netbird/shared/management/status"
|
||||
)
|
||||
|
||||
@@ -33,36 +31,34 @@ type ClusterDeriver interface {
|
||||
GetClusterDomains() []string
|
||||
}
|
||||
|
||||
type managerImpl struct {
|
||||
type Manager struct {
|
||||
store store.Store
|
||||
accountManager account.Manager
|
||||
permissionsManager permissions.Manager
|
||||
settingsManager settings.Manager
|
||||
proxyGRPCServer *nbgrpc.ProxyServiceServer
|
||||
proxyController proxy.Controller
|
||||
clusterDeriver ClusterDeriver
|
||||
exposeTracker *exposeTracker
|
||||
exposeReaper *exposeReaper
|
||||
}
|
||||
|
||||
// NewManager creates a new service manager.
|
||||
func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, settingsManager settings.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, clusterDeriver ClusterDeriver) reverseproxy.Manager {
|
||||
mgr := &managerImpl{
|
||||
func NewManager(store store.Store, accountManager account.Manager, permissionsManager permissions.Manager, proxyController proxy.Controller, clusterDeriver ClusterDeriver) *Manager {
|
||||
mgr := &Manager{
|
||||
store: store,
|
||||
accountManager: accountManager,
|
||||
permissionsManager: permissionsManager,
|
||||
settingsManager: settingsManager,
|
||||
proxyGRPCServer: proxyGRPCServer,
|
||||
proxyController: proxyController,
|
||||
clusterDeriver: clusterDeriver,
|
||||
}
|
||||
mgr.exposeTracker = &exposeTracker{manager: mgr}
|
||||
mgr.exposeReaper = &exposeReaper{manager: mgr}
|
||||
return mgr
|
||||
}
|
||||
|
||||
// StartExposeReaper delegates to the expose tracker.
|
||||
func (m *managerImpl) StartExposeReaper(ctx context.Context) {
|
||||
m.exposeTracker.StartExposeReaper(ctx)
|
||||
// StartExposeReaper starts the background goroutine that reaps expired ephemeral services.
|
||||
func (m *Manager) StartExposeReaper(ctx context.Context) {
|
||||
m.exposeReaper.StartExposeReaper(ctx)
|
||||
}
|
||||
|
||||
func (m *managerImpl) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) {
|
||||
func (m *Manager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) {
|
||||
ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read)
|
||||
if err != nil {
|
||||
return nil, status.NewPermissionValidationError(err)
|
||||
@@ -86,34 +82,34 @@ func (m *managerImpl) GetAllServices(ctx context.Context, accountID, userID stri
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) replaceHostByLookup(ctx context.Context, accountID string, service *reverseproxy.Service) error {
|
||||
for _, target := range service.Targets {
|
||||
func (m *Manager) replaceHostByLookup(ctx context.Context, accountID string, s *service.Service) error {
|
||||
for _, target := range s.Targets {
|
||||
switch target.TargetType {
|
||||
case reverseproxy.TargetTypePeer:
|
||||
case service.TargetTypePeer:
|
||||
peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, target.TargetId)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Warnf("failed to get peer by id %s for service %s: %v", target.TargetId, service.ID, err)
|
||||
log.WithContext(ctx).Warnf("failed to get peer by id %s for service %s: %v", target.TargetId, s.ID, err)
|
||||
target.Host = unknownHostPlaceholder
|
||||
continue
|
||||
}
|
||||
target.Host = peer.IP.String()
|
||||
case reverseproxy.TargetTypeHost:
|
||||
case service.TargetTypeHost:
|
||||
resource, err := m.store.GetNetworkResourceByID(ctx, store.LockingStrengthNone, accountID, target.TargetId)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, service.ID, err)
|
||||
log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, s.ID, err)
|
||||
target.Host = unknownHostPlaceholder
|
||||
continue
|
||||
}
|
||||
target.Host = resource.Prefix.Addr().String()
|
||||
case reverseproxy.TargetTypeDomain:
|
||||
case service.TargetTypeDomain:
|
||||
resource, err := m.store.GetNetworkResourceByID(ctx, store.LockingStrengthNone, accountID, target.TargetId)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, service.ID, err)
|
||||
log.WithContext(ctx).Warnf("failed to get resource by id %s for service %s: %v", target.TargetId, s.ID, err)
|
||||
target.Host = unknownHostPlaceholder
|
||||
continue
|
||||
}
|
||||
target.Host = resource.Domain
|
||||
case reverseproxy.TargetTypeSubnet:
|
||||
case service.TargetTypeSubnet:
|
||||
// For subnets we do not do any lookups on the resource
|
||||
default:
|
||||
return fmt.Errorf("unknown target type: %s", target.TargetType)
|
||||
@@ -122,7 +118,7 @@ func (m *managerImpl) replaceHostByLookup(ctx context.Context, accountID string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) GetService(ctx context.Context, accountID, userID, serviceID string) (*reverseproxy.Service, error) {
|
||||
func (m *Manager) GetService(ctx context.Context, accountID, userID, serviceID string) (*service.Service, error) {
|
||||
ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Read)
|
||||
if err != nil {
|
||||
return nil, status.NewPermissionValidationError(err)
|
||||
@@ -143,7 +139,7 @@ func (m *managerImpl) GetService(ctx context.Context, accountID, userID, service
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) CreateService(ctx context.Context, accountID, userID string, service *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
func (m *Manager) CreateService(ctx context.Context, accountID, userID string, s *service.Service) (*service.Service, error) {
|
||||
ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Create)
|
||||
if err != nil {
|
||||
return nil, status.NewPermissionValidationError(err)
|
||||
@@ -152,29 +148,29 @@ func (m *managerImpl) CreateService(ctx context.Context, accountID, userID strin
|
||||
return nil, status.NewPermissionDeniedError()
|
||||
}
|
||||
|
||||
if err := m.initializeServiceForCreate(ctx, accountID, service); err != nil {
|
||||
if err := m.initializeServiceForCreate(ctx, accountID, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := m.persistNewService(ctx, accountID, service); err != nil {
|
||||
if err := m.persistNewService(ctx, accountID, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceCreated, service.EventMeta())
|
||||
m.accountManager.StoreEvent(ctx, userID, s.ID, accountID, activity.ServiceCreated, s.EventMeta())
|
||||
|
||||
err = m.replaceHostByLookup(ctx, accountID, service)
|
||||
err = m.replaceHostByLookup(ctx, accountID, s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err)
|
||||
return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", s.ID, err)
|
||||
}
|
||||
|
||||
m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "")
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster)
|
||||
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return service, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) initializeServiceForCreate(ctx context.Context, accountID string, service *reverseproxy.Service) error {
|
||||
func (m *Manager) initializeServiceForCreate(ctx context.Context, accountID string, service *service.Service) error {
|
||||
if m.clusterDeriver != nil {
|
||||
proxyCluster, err := m.clusterDeriver.DeriveClusterFromDomain(ctx, accountID, service.Domain)
|
||||
if err != nil {
|
||||
@@ -201,9 +197,9 @@ func (m *managerImpl) initializeServiceForCreate(ctx context.Context, accountID
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) persistNewService(ctx context.Context, accountID string, service *reverseproxy.Service) error {
|
||||
func (m *Manager) persistNewService(ctx context.Context, accountID string, service *service.Service) error {
|
||||
return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, ""); err != nil {
|
||||
if err := m.checkDomainAvailable(ctx, transaction, service.Domain, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -219,8 +215,54 @@ func (m *managerImpl) persistNewService(ctx context.Context, accountID string, s
|
||||
})
|
||||
}
|
||||
|
||||
func (m *managerImpl) checkDomainAvailable(ctx context.Context, transaction store.Store, accountID, domain, excludeServiceID string) error {
|
||||
existingService, err := transaction.GetServiceByDomain(ctx, accountID, domain)
|
||||
// persistNewEphemeralService creates an ephemeral service inside a single transaction
|
||||
// that also enforces the duplicate and per-peer limit checks atomically.
|
||||
// The count and exists queries use FOR UPDATE locking to serialize concurrent creates
|
||||
// for the same peer, preventing the per-peer limit from being bypassed.
|
||||
func (m *Manager) persistNewEphemeralService(ctx context.Context, accountID, peerID string, svc *service.Service) error {
|
||||
return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
// Lock the peer row to serialize concurrent creates for the same peer.
|
||||
// Without this, when no ephemeral rows exist yet, FOR UPDATE on the services
|
||||
// table returns no rows and acquires no locks, allowing concurrent inserts
|
||||
// to bypass the per-peer limit.
|
||||
if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthUpdate, accountID, peerID); err != nil {
|
||||
return fmt.Errorf("lock peer row: %w", err)
|
||||
}
|
||||
|
||||
exists, err := transaction.EphemeralServiceExists(ctx, store.LockingStrengthUpdate, accountID, peerID, svc.Domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("check existing expose: %w", err)
|
||||
}
|
||||
if exists {
|
||||
return status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain")
|
||||
}
|
||||
|
||||
count, err := transaction.CountEphemeralServicesByPeer(ctx, store.LockingStrengthUpdate, accountID, peerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("count peer exposes: %w", err)
|
||||
}
|
||||
if count >= int64(maxExposesPerPeer) {
|
||||
return status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer)
|
||||
}
|
||||
|
||||
if err := m.checkDomainAvailable(ctx, transaction, svc.Domain, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateTargetReferences(ctx, transaction, accountID, svc.Targets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := transaction.CreateService(ctx, svc); err != nil {
|
||||
return fmt.Errorf("create service: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Manager) checkDomainAvailable(ctx context.Context, transaction store.Store, domain, excludeServiceID string) error {
|
||||
existingService, err := transaction.GetServiceByDomain(ctx, domain)
|
||||
if err != nil {
|
||||
if sErr, ok := status.FromError(err); !ok || sErr.Type() != status.NotFound {
|
||||
return fmt.Errorf("failed to check existing service: %w", err)
|
||||
@@ -229,13 +271,13 @@ func (m *managerImpl) checkDomainAvailable(ctx context.Context, transaction stor
|
||||
}
|
||||
|
||||
if existingService != nil && existingService.ID != excludeServiceID {
|
||||
return status.Errorf(status.AlreadyExists, "service with domain %s already exists", domain)
|
||||
return status.Errorf(status.AlreadyExists, "domain already taken")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) UpdateService(ctx context.Context, accountID, userID string, service *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
func (m *Manager) UpdateService(ctx context.Context, accountID, userID string, service *service.Service) (*service.Service, error) {
|
||||
ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Update)
|
||||
if err != nil {
|
||||
return nil, status.NewPermissionValidationError(err)
|
||||
@@ -259,7 +301,7 @@ func (m *managerImpl) UpdateService(ctx context.Context, accountID, userID strin
|
||||
return nil, fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err)
|
||||
}
|
||||
|
||||
m.sendServiceUpdateNotifications(service, updateInfo)
|
||||
m.sendServiceUpdateNotifications(ctx, accountID, service, updateInfo)
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return service, nil
|
||||
@@ -271,7 +313,7 @@ type serviceUpdateInfo struct {
|
||||
serviceEnabledChanged bool
|
||||
}
|
||||
|
||||
func (m *managerImpl) persistServiceUpdate(ctx context.Context, accountID string, service *reverseproxy.Service) (*serviceUpdateInfo, error) {
|
||||
func (m *Manager) persistServiceUpdate(ctx context.Context, accountID string, service *service.Service) (*serviceUpdateInfo, error) {
|
||||
var updateInfo serviceUpdateInfo
|
||||
|
||||
err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
@@ -309,8 +351,8 @@ func (m *managerImpl) persistServiceUpdate(ctx context.Context, accountID string
|
||||
return &updateInfo, err
|
||||
}
|
||||
|
||||
func (m *managerImpl) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *reverseproxy.Service) error {
|
||||
if err := m.checkDomainAvailable(ctx, transaction, accountID, service.Domain, service.ID); err != nil {
|
||||
func (m *Manager) handleDomainChange(ctx context.Context, transaction store.Store, accountID string, service *service.Service) error {
|
||||
if err := m.checkDomainAvailable(ctx, transaction, service.Domain, service.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -326,7 +368,7 @@ func (m *managerImpl) handleDomainChange(ctx context.Context, transaction store.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) preserveExistingAuthSecrets(service, existingService *reverseproxy.Service) {
|
||||
func (m *Manager) preserveExistingAuthSecrets(service, existingService *service.Service) {
|
||||
if service.Auth.PasswordAuth != nil && service.Auth.PasswordAuth.Enabled &&
|
||||
existingService.Auth.PasswordAuth != nil && existingService.Auth.PasswordAuth.Enabled &&
|
||||
service.Auth.PasswordAuth.Password == "" {
|
||||
@@ -340,54 +382,40 @@ func (m *managerImpl) preserveExistingAuthSecrets(service, existingService *reve
|
||||
}
|
||||
}
|
||||
|
||||
func (m *managerImpl) preserveServiceMetadata(service, existingService *reverseproxy.Service) {
|
||||
func (m *Manager) preserveServiceMetadata(service, existingService *service.Service) {
|
||||
service.Meta = existingService.Meta
|
||||
service.SessionPrivateKey = existingService.SessionPrivateKey
|
||||
service.SessionPublicKey = existingService.SessionPublicKey
|
||||
}
|
||||
|
||||
func (m *managerImpl) sendServiceUpdateNotifications(service *reverseproxy.Service, updateInfo *serviceUpdateInfo) {
|
||||
func (m *Manager) sendServiceUpdateNotifications(ctx context.Context, accountID string, s *service.Service, updateInfo *serviceUpdateInfo) {
|
||||
oidcCfg := m.proxyController.GetOIDCValidationConfig()
|
||||
|
||||
switch {
|
||||
case updateInfo.domainChanged && updateInfo.oldCluster != service.ProxyCluster:
|
||||
m.sendServiceUpdate(service, reverseproxy.Delete, updateInfo.oldCluster, "")
|
||||
m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "")
|
||||
case !service.Enabled && updateInfo.serviceEnabledChanged:
|
||||
m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "")
|
||||
case service.Enabled && updateInfo.serviceEnabledChanged:
|
||||
m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "")
|
||||
case updateInfo.domainChanged && updateInfo.oldCluster != s.ProxyCluster:
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", oidcCfg), updateInfo.oldCluster)
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", oidcCfg), s.ProxyCluster)
|
||||
case !s.Enabled && updateInfo.serviceEnabledChanged:
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", oidcCfg), s.ProxyCluster)
|
||||
case s.Enabled && updateInfo.serviceEnabledChanged:
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Create, "", oidcCfg), s.ProxyCluster)
|
||||
default:
|
||||
m.sendServiceUpdate(service, reverseproxy.Update, service.ProxyCluster, "")
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", oidcCfg), s.ProxyCluster)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *managerImpl) sendServiceUpdate(service *reverseproxy.Service, operation reverseproxy.Operation, cluster, oldService string) {
|
||||
oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig()
|
||||
mapping := service.ToProtoMapping(operation, oldService, oidcCfg)
|
||||
m.sendMappingsToCluster([]*proto.ProxyMapping{mapping}, cluster)
|
||||
}
|
||||
|
||||
func (m *managerImpl) sendMappingsToCluster(mappings []*proto.ProxyMapping, cluster string) {
|
||||
if len(mappings) == 0 {
|
||||
return
|
||||
}
|
||||
update := &proto.GetMappingUpdateResponse{
|
||||
Mapping: mappings,
|
||||
}
|
||||
m.proxyGRPCServer.SendServiceUpdateToCluster(update, cluster)
|
||||
}
|
||||
|
||||
// validateTargetReferences checks that all target IDs reference existing peers or resources in the account.
|
||||
func validateTargetReferences(ctx context.Context, transaction store.Store, accountID string, targets []*reverseproxy.Target) error {
|
||||
func validateTargetReferences(ctx context.Context, transaction store.Store, accountID string, targets []*service.Target) error {
|
||||
for _, target := range targets {
|
||||
switch target.TargetType {
|
||||
case reverseproxy.TargetTypePeer:
|
||||
case service.TargetTypePeer:
|
||||
if _, err := transaction.GetPeerByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil {
|
||||
if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound {
|
||||
return status.Errorf(status.InvalidArgument, "peer target %q not found in account", target.TargetId)
|
||||
}
|
||||
return fmt.Errorf("look up peer target %q: %w", target.TargetId, err)
|
||||
}
|
||||
case reverseproxy.TargetTypeHost, reverseproxy.TargetTypeSubnet, reverseproxy.TargetTypeDomain:
|
||||
case service.TargetTypeHost, service.TargetTypeSubnet, service.TargetTypeDomain:
|
||||
if _, err := transaction.GetNetworkResourceByID(ctx, store.LockingStrengthShare, accountID, target.TargetId); err != nil {
|
||||
if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound {
|
||||
return status.Errorf(status.InvalidArgument, "resource target %q not found in account", target.TargetId)
|
||||
@@ -399,7 +427,7 @@ func validateTargetReferences(ctx context.Context, transaction store.Store, acco
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serviceID string) error {
|
||||
func (m *Manager) DeleteService(ctx context.Context, accountID, userID, serviceID string) error {
|
||||
ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete)
|
||||
if err != nil {
|
||||
return status.NewPermissionValidationError(err)
|
||||
@@ -408,14 +436,18 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv
|
||||
return status.NewPermissionDeniedError()
|
||||
}
|
||||
|
||||
var service *reverseproxy.Service
|
||||
var s *service.Service
|
||||
err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
var err error
|
||||
service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
s, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = transaction.DeleteServiceTargets(ctx, accountID, serviceID); err != nil {
|
||||
return fmt.Errorf("failed to delete targets: %w", err)
|
||||
}
|
||||
|
||||
if err = transaction.DeleteService(ctx, accountID, serviceID); err != nil {
|
||||
return fmt.Errorf("failed to delete service: %w", err)
|
||||
}
|
||||
@@ -426,20 +458,16 @@ func (m *managerImpl) DeleteService(ctx context.Context, accountID, userID, serv
|
||||
return err
|
||||
}
|
||||
|
||||
if service.Source == reverseproxy.SourceEphemeral {
|
||||
m.exposeTracker.UntrackExpose(service.SourcePeer, service.Domain)
|
||||
}
|
||||
m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, s.EventMeta())
|
||||
|
||||
m.accountManager.StoreEvent(ctx, userID, serviceID, accountID, activity.ServiceDeleted, service.EventMeta())
|
||||
|
||||
m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "")
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster)
|
||||
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID string) error {
|
||||
func (m *Manager) DeleteAllServices(ctx context.Context, accountID, userID string) error {
|
||||
ok, err := m.permissionsManager.ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete)
|
||||
if err != nil {
|
||||
return status.NewPermissionValidationError(err)
|
||||
@@ -448,16 +476,16 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s
|
||||
return status.NewPermissionDeniedError()
|
||||
}
|
||||
|
||||
var services []*reverseproxy.Service
|
||||
var services []*service.Service
|
||||
err = m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
var err error
|
||||
services, err = transaction.GetServicesByAccountID(ctx, store.LockingStrengthUpdate, accountID)
|
||||
services, err = transaction.GetAccountServices(ctx, store.LockingStrengthUpdate, accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
if err = transaction.DeleteService(ctx, accountID, service.ID); err != nil {
|
||||
for _, svc := range services {
|
||||
if err = transaction.DeleteService(ctx, accountID, svc.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete service: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -468,20 +496,11 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s
|
||||
return err
|
||||
}
|
||||
|
||||
clusterMappings := make(map[string][]*proto.ProxyMapping)
|
||||
oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig()
|
||||
oidcCfg := m.proxyController.GetOIDCValidationConfig()
|
||||
|
||||
for _, service := range services {
|
||||
if service.Source == reverseproxy.SourceEphemeral {
|
||||
m.exposeTracker.UntrackExpose(service.SourcePeer, service.Domain)
|
||||
}
|
||||
m.accountManager.StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, service.EventMeta())
|
||||
mapping := service.ToProtoMapping(reverseproxy.Delete, "", oidcCfg)
|
||||
clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping)
|
||||
}
|
||||
|
||||
for cluster, mappings := range clusterMappings {
|
||||
m.sendMappingsToCluster(mappings, cluster)
|
||||
for _, svc := range services {
|
||||
m.accountManager.StoreEvent(ctx, userID, svc.ID, accountID, activity.ServiceDeleted, svc.EventMeta())
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", oidcCfg), svc.ProxyCluster)
|
||||
}
|
||||
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
@@ -491,7 +510,7 @@ func (m *managerImpl) DeleteAllServices(ctx context.Context, accountID, userID s
|
||||
|
||||
// SetCertificateIssuedAt sets the certificate issued timestamp to the current time.
|
||||
// Call this when receiving a gRPC notification that the certificate was issued.
|
||||
func (m *managerImpl) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error {
|
||||
func (m *Manager) SetCertificateIssuedAt(ctx context.Context, accountID, serviceID string) error {
|
||||
return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
service, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
if err != nil {
|
||||
@@ -510,7 +529,7 @@ func (m *managerImpl) SetCertificateIssuedAt(ctx context.Context, accountID, ser
|
||||
}
|
||||
|
||||
// SetStatus updates the status of the service (e.g., "active", "tunnel_not_created", etc.)
|
||||
func (m *managerImpl) SetStatus(ctx context.Context, accountID, serviceID string, status reverseproxy.ProxyStatus) error {
|
||||
func (m *Manager) SetStatus(ctx context.Context, accountID, serviceID string, status service.Status) error {
|
||||
return m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
service, err := transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
if err != nil {
|
||||
@@ -527,50 +546,42 @@ func (m *managerImpl) SetStatus(ctx context.Context, accountID, serviceID string
|
||||
})
|
||||
}
|
||||
|
||||
func (m *managerImpl) ReloadService(ctx context.Context, accountID, serviceID string) error {
|
||||
service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID)
|
||||
func (m *Manager) ReloadService(ctx context.Context, accountID, serviceID string) error {
|
||||
s, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get service: %w", err)
|
||||
}
|
||||
|
||||
err = m.replaceHostByLookup(ctx, accountID, service)
|
||||
err = m.replaceHostByLookup(ctx, accountID, s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err)
|
||||
return fmt.Errorf("failed to replace host by lookup for service %s: %w", s.ID, err)
|
||||
}
|
||||
|
||||
m.sendServiceUpdate(service, reverseproxy.Update, service.ProxyCluster, "")
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster)
|
||||
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) ReloadAllServicesForAccount(ctx context.Context, accountID string) error {
|
||||
func (m *Manager) ReloadAllServicesForAccount(ctx context.Context, accountID string) error {
|
||||
services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get services: %w", err)
|
||||
}
|
||||
|
||||
clusterMappings := make(map[string][]*proto.ProxyMapping)
|
||||
oidcCfg := m.proxyGRPCServer.GetOIDCValidationConfig()
|
||||
|
||||
for _, service := range services {
|
||||
err = m.replaceHostByLookup(ctx, accountID, service)
|
||||
for _, s := range services {
|
||||
err = m.replaceHostByLookup(ctx, accountID, s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to replace host by lookup for service %s: %w", service.ID, err)
|
||||
return fmt.Errorf("failed to replace host by lookup for service %s: %w", s.ID, err)
|
||||
}
|
||||
mapping := service.ToProtoMapping(reverseproxy.Update, "", oidcCfg)
|
||||
clusterMappings[service.ProxyCluster] = append(clusterMappings[service.ProxyCluster], mapping)
|
||||
}
|
||||
|
||||
for cluster, mappings := range clusterMappings {
|
||||
m.sendMappingsToCluster(mappings, cluster)
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, s.ToProtoMapping(service.Update, "", m.proxyController.GetOIDCValidationConfig()), s.ProxyCluster)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) {
|
||||
func (m *Manager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) {
|
||||
services, err := m.store.GetServices(ctx, store.LockingStrengthNone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get services: %w", err)
|
||||
@@ -586,7 +597,7 @@ func (m *managerImpl) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Se
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) GetServiceByID(ctx context.Context, accountID, serviceID string) (*reverseproxy.Service, error) {
|
||||
func (m *Manager) GetServiceByID(ctx context.Context, accountID, serviceID string) (*service.Service, error) {
|
||||
service, err := m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, serviceID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get service: %w", err)
|
||||
@@ -600,7 +611,7 @@ func (m *managerImpl) GetServiceByID(ctx context.Context, accountID, serviceID s
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (m *Manager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) {
|
||||
services, err := m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get services: %w", err)
|
||||
@@ -616,7 +627,7 @@ func (m *managerImpl) GetAccountServices(ctx context.Context, accountID string)
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) {
|
||||
func (m *Manager) GetServiceIDByTargetID(ctx context.Context, accountID string, resourceID string) (string, error) {
|
||||
target, err := m.store.GetServiceTargetByTargetID(ctx, store.LockingStrengthNone, accountID, resourceID)
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok && s.Type() == status.NotFound {
|
||||
@@ -634,7 +645,7 @@ func (m *managerImpl) GetServiceIDByTargetID(ctx context.Context, accountID stri
|
||||
|
||||
// validateExposePermission checks whether the peer is allowed to use the expose feature.
|
||||
// It verifies the account has peer expose enabled and that the peer belongs to an allowed group.
|
||||
func (m *managerImpl) validateExposePermission(ctx context.Context, accountID, peerID string) error {
|
||||
func (m *Manager) validateExposePermission(ctx context.Context, accountID, peerID string) error {
|
||||
settings, err := m.store.GetAccountSettings(ctx, store.LockingStrengthNone, accountID)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get account settings: %v", err)
|
||||
@@ -667,7 +678,7 @@ func (m *managerImpl) validateExposePermission(ctx context.Context, accountID, p
|
||||
// CreateServiceFromPeer creates a service initiated by a peer expose request.
|
||||
// It validates the request, checks expose permissions, enforces the per-peer limit,
|
||||
// creates the service, and tracks it for TTL-based reaping.
|
||||
func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) {
|
||||
func (m *Manager) CreateServiceFromPeer(ctx context.Context, accountID, peerID string, req *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, status.Errorf(status.InvalidArgument, "validate expose request: %v", err)
|
||||
}
|
||||
@@ -676,31 +687,31 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceName, err := reverseproxy.GenerateExposeName(req.NamePrefix)
|
||||
serviceName, err := service.GenerateExposeName(req.NamePrefix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(status.InvalidArgument, "generate service name: %v", err)
|
||||
}
|
||||
|
||||
service := req.ToService(accountID, peerID, serviceName)
|
||||
service.Source = reverseproxy.SourceEphemeral
|
||||
svc := req.ToService(accountID, peerID, serviceName)
|
||||
svc.Source = service.SourceEphemeral
|
||||
|
||||
if service.Domain == "" {
|
||||
domain, err := m.buildRandomDomain(service.Name)
|
||||
if svc.Domain == "" {
|
||||
domain, err := m.buildRandomDomain(svc.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build random domain for service %s: %w", service.Name, err)
|
||||
return nil, fmt.Errorf("build random domain for service %s: %w", svc.Name, err)
|
||||
}
|
||||
service.Domain = domain
|
||||
svc.Domain = domain
|
||||
}
|
||||
|
||||
if service.Auth.BearerAuth != nil && service.Auth.BearerAuth.Enabled {
|
||||
groupIDs, err := m.getGroupIDsFromNames(ctx, accountID, service.Auth.BearerAuth.DistributionGroups)
|
||||
if svc.Auth.BearerAuth != nil && svc.Auth.BearerAuth.Enabled {
|
||||
groupIDs, err := m.getGroupIDsFromNames(ctx, accountID, svc.Auth.BearerAuth.DistributionGroups)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get group ids for service %s: %w", service.Name, err)
|
||||
return nil, fmt.Errorf("get group ids for service %s: %w", svc.Name, err)
|
||||
}
|
||||
service.Auth.BearerAuth.DistributionGroups = groupIDs
|
||||
svc.Auth.BearerAuth.DistributionGroups = groupIDs
|
||||
}
|
||||
|
||||
if err := m.initializeServiceForCreate(ctx, accountID, service); err != nil {
|
||||
if err := m.initializeServiceForCreate(ctx, accountID, svc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -709,46 +720,33 @@ func (m *managerImpl) CreateServiceFromPeer(ctx context.Context, accountID, peer
|
||||
return nil, err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
service.Meta.LastRenewedAt = &now
|
||||
service.SourcePeer = peerID
|
||||
svc.SourcePeer = peerID
|
||||
|
||||
if err := m.persistNewService(ctx, accountID, service); err != nil {
|
||||
now := time.Now()
|
||||
svc.Meta.LastRenewedAt = &now
|
||||
|
||||
if err := m.persistNewEphemeralService(ctx, accountID, peerID, svc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alreadyTracked, allowed := m.exposeTracker.TrackExposeIfAllowed(peerID, service.Domain, accountID)
|
||||
if alreadyTracked {
|
||||
if err := m.deleteServiceFromPeer(ctx, accountID, peerID, service.Domain, false); err != nil {
|
||||
log.WithContext(ctx).Debugf("failed to delete duplicate expose service for domain %s: %v", service.Domain, err)
|
||||
}
|
||||
return nil, status.Errorf(status.AlreadyExists, "peer already has an active expose session for this domain")
|
||||
}
|
||||
if !allowed {
|
||||
if err := m.deleteServiceFromPeer(ctx, accountID, peerID, service.Domain, false); err != nil {
|
||||
log.WithContext(ctx).Debugf("failed to delete service after limit exceeded for domain %s: %v", service.Domain, err)
|
||||
}
|
||||
return nil, status.Errorf(status.PreconditionFailed, "peer has reached the maximum number of active expose sessions (%d)", maxExposesPerPeer)
|
||||
meta := addPeerInfoToEventMeta(svc.EventMeta(), peer)
|
||||
m.accountManager.StoreEvent(ctx, peerID, svc.ID, accountID, activity.PeerServiceExposed, meta)
|
||||
|
||||
if err := m.replaceHostByLookup(ctx, accountID, svc); err != nil {
|
||||
return nil, fmt.Errorf("replace host by lookup for service %s: %w", svc.ID, err)
|
||||
}
|
||||
|
||||
meta := addPeerInfoToEventMeta(service.EventMeta(), peer)
|
||||
m.accountManager.StoreEvent(ctx, peerID, service.ID, accountID, activity.PeerServiceExposed, meta)
|
||||
|
||||
if err := m.replaceHostByLookup(ctx, accountID, service); err != nil {
|
||||
return nil, fmt.Errorf("replace host by lookup for service %s: %w", service.ID, err)
|
||||
}
|
||||
|
||||
m.sendServiceUpdate(service, reverseproxy.Create, service.ProxyCluster, "")
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Create, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster)
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return &reverseproxy.ExposeServiceResponse{
|
||||
ServiceName: service.Name,
|
||||
ServiceURL: "https://" + service.Domain,
|
||||
Domain: service.Domain,
|
||||
return &service.ExposeServiceResponse{
|
||||
ServiceName: svc.Name,
|
||||
ServiceURL: "https://" + svc.Domain,
|
||||
Domain: svc.Domain,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string, groupNames []string) ([]string, error) {
|
||||
func (m *Manager) getGroupIDsFromNames(ctx context.Context, accountID string, groupNames []string) ([]string, error) {
|
||||
if len(groupNames) == 0 {
|
||||
return []string{}, fmt.Errorf("no group names provided")
|
||||
}
|
||||
@@ -763,7 +761,7 @@ func (m *managerImpl) getGroupIDsFromNames(ctx context.Context, accountID string
|
||||
return groupIDs, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) buildRandomDomain(name string) (string, error) {
|
||||
func (m *Manager) buildRandomDomain(name string) (string, error) {
|
||||
if m.clusterDeriver == nil {
|
||||
return "", fmt.Errorf("unable to get random domain")
|
||||
}
|
||||
@@ -776,33 +774,24 @@ func (m *managerImpl) buildRandomDomain(name string) (string, error) {
|
||||
return domain, nil
|
||||
}
|
||||
|
||||
// RenewServiceFromPeer renews the in-memory TTL tracker for the peer's expose session.
|
||||
// Returns an error if the expose is not actively tracked.
|
||||
func (m *managerImpl) RenewServiceFromPeer(_ context.Context, _, peerID, domain string) error {
|
||||
if !m.exposeTracker.RenewTrackedExpose(peerID, domain) {
|
||||
return status.Errorf(status.NotFound, "no active expose session for domain %s", domain)
|
||||
}
|
||||
return nil
|
||||
// RenewServiceFromPeer updates the DB timestamp for the peer's ephemeral service.
|
||||
func (m *Manager) RenewServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error {
|
||||
return m.store.RenewEphemeralService(ctx, accountID, peerID, domain)
|
||||
}
|
||||
|
||||
// StopServiceFromPeer stops a peer's active expose session by untracking and deleting the service.
|
||||
func (m *managerImpl) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error {
|
||||
// StopServiceFromPeer stops a peer's active expose session by deleting the service from the DB.
|
||||
func (m *Manager) StopServiceFromPeer(ctx context.Context, accountID, peerID, domain string) error {
|
||||
if err := m.deleteServiceFromPeer(ctx, accountID, peerID, domain, false); err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to delete peer-exposed service for domain %s: %v", domain, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !m.exposeTracker.StopTrackedExpose(peerID, domain) {
|
||||
log.WithContext(ctx).Warnf("expose tracker entry for domain %s already removed; service was deleted", domain)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteServiceFromPeer deletes a peer-initiated service identified by domain.
|
||||
// When expired is true, the activity is recorded as PeerServiceExposeExpired instead of PeerServiceUnexposed.
|
||||
func (m *managerImpl) deleteServiceFromPeer(ctx context.Context, accountID, peerID, domain string, expired bool) error {
|
||||
service, err := m.lookupPeerService(ctx, accountID, peerID, domain)
|
||||
func (m *Manager) deleteServiceFromPeer(ctx context.Context, accountID, peerID, domain string, expired bool) error {
|
||||
svc, err := m.lookupPeerService(ctx, accountID, peerID, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -811,41 +800,41 @@ func (m *managerImpl) deleteServiceFromPeer(ctx context.Context, accountID, peer
|
||||
if expired {
|
||||
activityCode = activity.PeerServiceExposeExpired
|
||||
}
|
||||
return m.deletePeerService(ctx, accountID, peerID, service.ID, activityCode)
|
||||
return m.deletePeerService(ctx, accountID, peerID, svc.ID, activityCode)
|
||||
}
|
||||
|
||||
// lookupPeerService finds a peer-initiated service by domain and validates ownership.
|
||||
func (m *managerImpl) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*reverseproxy.Service, error) {
|
||||
service, err := m.store.GetServiceByDomain(ctx, accountID, domain)
|
||||
func (m *Manager) lookupPeerService(ctx context.Context, accountID, peerID, domain string) (*service.Service, error) {
|
||||
svc, err := m.store.GetServiceByDomain(ctx, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if service.Source != reverseproxy.SourceEphemeral {
|
||||
if svc.Source != service.SourceEphemeral {
|
||||
return nil, status.Errorf(status.PermissionDenied, "cannot operate on API-created service via peer expose")
|
||||
}
|
||||
|
||||
if service.SourcePeer != peerID {
|
||||
if svc.SourcePeer != peerID {
|
||||
return nil, status.Errorf(status.PermissionDenied, "cannot operate on service exposed by another peer")
|
||||
}
|
||||
|
||||
return service, nil
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func (m *managerImpl) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error {
|
||||
var service *reverseproxy.Service
|
||||
func (m *Manager) deletePeerService(ctx context.Context, accountID, peerID, serviceID string, activityCode activity.Activity) error {
|
||||
var svc *service.Service
|
||||
err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
var err error
|
||||
service, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
svc, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if service.Source != reverseproxy.SourceEphemeral {
|
||||
if svc.Source != service.SourceEphemeral {
|
||||
return status.Errorf(status.PermissionDenied, "cannot delete API-created service via peer expose")
|
||||
}
|
||||
|
||||
if service.SourcePeer != peerID {
|
||||
if svc.SourcePeer != peerID {
|
||||
return status.Errorf(status.PermissionDenied, "cannot delete service exposed by another peer")
|
||||
}
|
||||
|
||||
@@ -865,17 +854,68 @@ func (m *managerImpl) deletePeerService(ctx context.Context, accountID, peerID,
|
||||
peer = nil
|
||||
}
|
||||
|
||||
meta := addPeerInfoToEventMeta(service.EventMeta(), peer)
|
||||
meta := addPeerInfoToEventMeta(svc.EventMeta(), peer)
|
||||
|
||||
m.accountManager.StoreEvent(ctx, peerID, serviceID, accountID, activityCode, meta)
|
||||
|
||||
m.sendServiceUpdate(service, reverseproxy.Delete, service.ProxyCluster, "")
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster)
|
||||
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteExpiredPeerService deletes an ephemeral service by ID after re-checking
|
||||
// that it is still expired under a row lock. This prevents deleting a service
|
||||
// that was renewed between the batch query and this delete, and ensures only one
|
||||
// management instance processes the deletion
|
||||
func (m *Manager) deleteExpiredPeerService(ctx context.Context, accountID, peerID, serviceID string) error {
|
||||
var svc *service.Service
|
||||
deleted := false
|
||||
err := m.store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
var err error
|
||||
svc, err = transaction.GetServiceByID(ctx, store.LockingStrengthUpdate, accountID, serviceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if svc.Source != service.SourceEphemeral || svc.SourcePeer != peerID {
|
||||
return status.Errorf(status.PermissionDenied, "service does not match expected ephemeral owner")
|
||||
}
|
||||
|
||||
if svc.Meta.LastRenewedAt != nil && time.Since(*svc.Meta.LastRenewedAt) <= exposeTTL {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = transaction.DeleteService(ctx, accountID, serviceID); err != nil {
|
||||
return fmt.Errorf("delete service: %w", err)
|
||||
}
|
||||
deleted = true
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !deleted {
|
||||
return nil
|
||||
}
|
||||
|
||||
peer, err := m.store.GetPeerByID(ctx, store.LockingStrengthNone, accountID, peerID)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Debugf("failed to get peer %s for event metadata: %v", peerID, err)
|
||||
peer = nil
|
||||
}
|
||||
|
||||
meta := addPeerInfoToEventMeta(svc.EventMeta(), peer)
|
||||
m.accountManager.StoreEvent(ctx, peerID, serviceID, accountID, activity.PeerServiceExposeExpired, meta)
|
||||
m.proxyController.SendServiceUpdateToCluster(ctx, accountID, svc.ToProtoMapping(service.Delete, "", m.proxyController.GetOIDCValidationConfig()), svc.ProxyCluster)
|
||||
m.accountManager.UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addPeerInfoToEventMeta(meta map[string]any, peer *nbpeer.Peer) map[string]any {
|
||||
if peer == nil {
|
||||
return meta
|
||||
@@ -10,18 +10,21 @@ import (
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
"github.com/netbirdio/netbird/management/server/account"
|
||||
"github.com/netbirdio/netbird/management/server/activity"
|
||||
"github.com/netbirdio/netbird/management/server/integrations/extra_settings"
|
||||
"github.com/netbirdio/netbird/management/server/mock_server"
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/management/server/permissions"
|
||||
"github.com/netbirdio/netbird/management/server/settings"
|
||||
"github.com/netbirdio/netbird/management/server/permissions/modules"
|
||||
"github.com/netbirdio/netbird/management/server/permissions/operations"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
"github.com/netbirdio/netbird/management/server/users"
|
||||
"github.com/netbirdio/netbird/shared/management/status"
|
||||
)
|
||||
|
||||
@@ -30,13 +33,13 @@ func TestInitializeServiceForCreate(t *testing.T) {
|
||||
accountID := "test-account"
|
||||
|
||||
t.Run("successful initialization without cluster deriver", func(t *testing.T) {
|
||||
mgr := &managerImpl{
|
||||
mgr := &Manager{
|
||||
clusterDeriver: nil,
|
||||
}
|
||||
|
||||
service := &reverseproxy.Service{
|
||||
service := &rpservice.Service{
|
||||
Domain: "example.com",
|
||||
Auth: reverseproxy.AuthConfig{},
|
||||
Auth: rpservice.AuthConfig{},
|
||||
}
|
||||
|
||||
err := mgr.initializeServiceForCreate(ctx, accountID, service)
|
||||
@@ -50,12 +53,12 @@ func TestInitializeServiceForCreate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("verifies session keys are different", func(t *testing.T) {
|
||||
mgr := &managerImpl{
|
||||
mgr := &Manager{
|
||||
clusterDeriver: nil,
|
||||
}
|
||||
|
||||
service1 := &reverseproxy.Service{Domain: "test1.com", Auth: reverseproxy.AuthConfig{}}
|
||||
service2 := &reverseproxy.Service{Domain: "test2.com", Auth: reverseproxy.AuthConfig{}}
|
||||
service1 := &rpservice.Service{Domain: "test1.com", Auth: rpservice.AuthConfig{}}
|
||||
service2 := &rpservice.Service{Domain: "test2.com", Auth: rpservice.AuthConfig{}}
|
||||
|
||||
err1 := mgr.initializeServiceForCreate(ctx, accountID, service1)
|
||||
err2 := mgr.initializeServiceForCreate(ctx, accountID, service2)
|
||||
@@ -69,7 +72,6 @@ func TestInitializeServiceForCreate(t *testing.T) {
|
||||
|
||||
func TestCheckDomainAvailable(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
accountID := "test-account"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -85,7 +87,7 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
excludeServiceID: "",
|
||||
setupMock: func(ms *store.MockStore) {
|
||||
ms.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "available.com").
|
||||
GetServiceByDomain(ctx, "available.com").
|
||||
Return(nil, status.Errorf(status.NotFound, "not found"))
|
||||
},
|
||||
expectedError: false,
|
||||
@@ -96,8 +98,8 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
excludeServiceID: "",
|
||||
setupMock: func(ms *store.MockStore) {
|
||||
ms.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "exists.com").
|
||||
Return(&reverseproxy.Service{ID: "existing-id", Domain: "exists.com"}, nil)
|
||||
GetServiceByDomain(ctx, "exists.com").
|
||||
Return(&rpservice.Service{ID: "existing-id", Domain: "exists.com"}, nil)
|
||||
},
|
||||
expectedError: true,
|
||||
errorType: status.AlreadyExists,
|
||||
@@ -108,8 +110,8 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
excludeServiceID: "service-123",
|
||||
setupMock: func(ms *store.MockStore) {
|
||||
ms.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "exists.com").
|
||||
Return(&reverseproxy.Service{ID: "service-123", Domain: "exists.com"}, nil)
|
||||
GetServiceByDomain(ctx, "exists.com").
|
||||
Return(&rpservice.Service{ID: "service-123", Domain: "exists.com"}, nil)
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
@@ -119,8 +121,8 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
excludeServiceID: "service-456",
|
||||
setupMock: func(ms *store.MockStore) {
|
||||
ms.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "exists.com").
|
||||
Return(&reverseproxy.Service{ID: "service-123", Domain: "exists.com"}, nil)
|
||||
GetServiceByDomain(ctx, "exists.com").
|
||||
Return(&rpservice.Service{ID: "service-123", Domain: "exists.com"}, nil)
|
||||
},
|
||||
expectedError: true,
|
||||
errorType: status.AlreadyExists,
|
||||
@@ -131,7 +133,7 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
excludeServiceID: "",
|
||||
setupMock: func(ms *store.MockStore) {
|
||||
ms.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "error.com").
|
||||
GetServiceByDomain(ctx, "error.com").
|
||||
Return(nil, errors.New("database error"))
|
||||
},
|
||||
expectedError: true,
|
||||
@@ -146,8 +148,8 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
tt.setupMock(mockStore)
|
||||
|
||||
mgr := &managerImpl{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, accountID, tt.domain, tt.excludeServiceID)
|
||||
mgr := &Manager{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, tt.domain, tt.excludeServiceID)
|
||||
|
||||
if tt.expectedError {
|
||||
require.Error(t, err)
|
||||
@@ -165,7 +167,6 @@ func TestCheckDomainAvailable(t *testing.T) {
|
||||
|
||||
func TestCheckDomainAvailable_EdgeCases(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
accountID := "test-account"
|
||||
|
||||
t.Run("empty domain", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
@@ -173,11 +174,11 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) {
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "").
|
||||
GetServiceByDomain(ctx, "").
|
||||
Return(nil, status.Errorf(status.NotFound, "not found"))
|
||||
|
||||
mgr := &managerImpl{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "", "")
|
||||
mgr := &Manager{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, "", "")
|
||||
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
@@ -188,11 +189,11 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) {
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "test.com").
|
||||
Return(&reverseproxy.Service{ID: "some-id", Domain: "test.com"}, nil)
|
||||
GetServiceByDomain(ctx, "test.com").
|
||||
Return(&rpservice.Service{ID: "some-id", Domain: "test.com"}, nil)
|
||||
|
||||
mgr := &managerImpl{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "test.com", "")
|
||||
mgr := &Manager{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, "test.com", "")
|
||||
|
||||
assert.Error(t, err)
|
||||
sErr, ok := status.FromError(err)
|
||||
@@ -206,11 +207,11 @@ func TestCheckDomainAvailable_EdgeCases(t *testing.T) {
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "nil.com").
|
||||
GetServiceByDomain(ctx, "nil.com").
|
||||
Return(nil, nil)
|
||||
|
||||
mgr := &managerImpl{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, accountID, "nil.com", "")
|
||||
mgr := &Manager{}
|
||||
err := mgr.checkDomainAvailable(ctx, mockStore, "nil.com", "")
|
||||
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
@@ -225,10 +226,10 @@ func TestPersistNewService(t *testing.T) {
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
service := &reverseproxy.Service{
|
||||
service := &rpservice.Service{
|
||||
ID: "service-123",
|
||||
Domain: "new.com",
|
||||
Targets: []*reverseproxy.Target{},
|
||||
Targets: []*rpservice.Target{},
|
||||
}
|
||||
|
||||
// Mock ExecuteInTransaction to execute the function immediately
|
||||
@@ -238,7 +239,7 @@ func TestPersistNewService(t *testing.T) {
|
||||
// Create another mock for the transaction
|
||||
txMock := store.NewMockStore(ctrl)
|
||||
txMock.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "new.com").
|
||||
GetServiceByDomain(ctx, "new.com").
|
||||
Return(nil, status.Errorf(status.NotFound, "not found"))
|
||||
txMock.EXPECT().
|
||||
CreateService(ctx, service).
|
||||
@@ -247,7 +248,7 @@ func TestPersistNewService(t *testing.T) {
|
||||
return fn(txMock)
|
||||
})
|
||||
|
||||
mgr := &managerImpl{store: mockStore}
|
||||
mgr := &Manager{store: mockStore}
|
||||
err := mgr.persistNewService(ctx, accountID, service)
|
||||
|
||||
assert.NoError(t, err)
|
||||
@@ -258,10 +259,10 @@ func TestPersistNewService(t *testing.T) {
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
service := &reverseproxy.Service{
|
||||
service := &rpservice.Service{
|
||||
ID: "service-123",
|
||||
Domain: "existing.com",
|
||||
Targets: []*reverseproxy.Target{},
|
||||
Targets: []*rpservice.Target{},
|
||||
}
|
||||
|
||||
mockStore.EXPECT().
|
||||
@@ -269,13 +270,13 @@ func TestPersistNewService(t *testing.T) {
|
||||
DoAndReturn(func(ctx context.Context, fn func(store.Store) error) error {
|
||||
txMock := store.NewMockStore(ctrl)
|
||||
txMock.EXPECT().
|
||||
GetServiceByDomain(ctx, accountID, "existing.com").
|
||||
Return(&reverseproxy.Service{ID: "other-id", Domain: "existing.com"}, nil)
|
||||
GetServiceByDomain(ctx, "existing.com").
|
||||
Return(&rpservice.Service{ID: "other-id", Domain: "existing.com"}, nil)
|
||||
|
||||
return fn(txMock)
|
||||
})
|
||||
|
||||
mgr := &managerImpl{store: mockStore}
|
||||
mgr := &Manager{store: mockStore}
|
||||
err := mgr.persistNewService(ctx, accountID, service)
|
||||
|
||||
require.Error(t, err)
|
||||
@@ -285,21 +286,21 @@ func TestPersistNewService(t *testing.T) {
|
||||
})
|
||||
}
|
||||
func TestPreserveExistingAuthSecrets(t *testing.T) {
|
||||
mgr := &managerImpl{}
|
||||
mgr := &Manager{}
|
||||
|
||||
t.Run("preserve password when empty", func(t *testing.T) {
|
||||
existing := &reverseproxy.Service{
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PasswordAuth: &reverseproxy.PasswordAuthConfig{
|
||||
existing := &rpservice.Service{
|
||||
Auth: rpservice.AuthConfig{
|
||||
PasswordAuth: &rpservice.PasswordAuthConfig{
|
||||
Enabled: true,
|
||||
Password: "hashed-password",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
updated := &reverseproxy.Service{
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PasswordAuth: &reverseproxy.PasswordAuthConfig{
|
||||
updated := &rpservice.Service{
|
||||
Auth: rpservice.AuthConfig{
|
||||
PasswordAuth: &rpservice.PasswordAuthConfig{
|
||||
Enabled: true,
|
||||
Password: "",
|
||||
},
|
||||
@@ -312,18 +313,18 @@ func TestPreserveExistingAuthSecrets(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("preserve pin when empty", func(t *testing.T) {
|
||||
existing := &reverseproxy.Service{
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PinAuth: &reverseproxy.PINAuthConfig{
|
||||
existing := &rpservice.Service{
|
||||
Auth: rpservice.AuthConfig{
|
||||
PinAuth: &rpservice.PINAuthConfig{
|
||||
Enabled: true,
|
||||
Pin: "hashed-pin",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
updated := &reverseproxy.Service{
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PinAuth: &reverseproxy.PINAuthConfig{
|
||||
updated := &rpservice.Service{
|
||||
Auth: rpservice.AuthConfig{
|
||||
PinAuth: &rpservice.PINAuthConfig{
|
||||
Enabled: true,
|
||||
Pin: "",
|
||||
},
|
||||
@@ -336,18 +337,18 @@ func TestPreserveExistingAuthSecrets(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("do not preserve when password is provided", func(t *testing.T) {
|
||||
existing := &reverseproxy.Service{
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PasswordAuth: &reverseproxy.PasswordAuthConfig{
|
||||
existing := &rpservice.Service{
|
||||
Auth: rpservice.AuthConfig{
|
||||
PasswordAuth: &rpservice.PasswordAuthConfig{
|
||||
Enabled: true,
|
||||
Password: "old-password",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
updated := &reverseproxy.Service{
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PasswordAuth: &reverseproxy.PasswordAuthConfig{
|
||||
updated := &rpservice.Service{
|
||||
Auth: rpservice.AuthConfig{
|
||||
PasswordAuth: &rpservice.PasswordAuthConfig{
|
||||
Enabled: true,
|
||||
Password: "new-password",
|
||||
},
|
||||
@@ -362,10 +363,10 @@ func TestPreserveExistingAuthSecrets(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPreserveServiceMetadata(t *testing.T) {
|
||||
mgr := &managerImpl{}
|
||||
mgr := &Manager{}
|
||||
|
||||
existing := &reverseproxy.Service{
|
||||
Meta: reverseproxy.ServiceMeta{
|
||||
existing := &rpservice.Service{
|
||||
Meta: rpservice.Meta{
|
||||
CertificateIssuedAt: func() *time.Time { t := time.Now(); return &t }(),
|
||||
Status: "active",
|
||||
},
|
||||
@@ -373,7 +374,7 @@ func TestPreserveServiceMetadata(t *testing.T) {
|
||||
SessionPublicKey: "public-key",
|
||||
}
|
||||
|
||||
updated := &reverseproxy.Service{
|
||||
updated := &rpservice.Service{
|
||||
Domain: "updated.com",
|
||||
}
|
||||
|
||||
@@ -397,32 +398,34 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
IP: net.ParseIP("100.64.0.1"),
|
||||
}
|
||||
|
||||
newEphemeralService := func() *reverseproxy.Service {
|
||||
return &reverseproxy.Service{
|
||||
newEphemeralService := func() *rpservice.Service {
|
||||
return &rpservice.Service{
|
||||
ID: serviceID,
|
||||
AccountID: accountID,
|
||||
Name: "test-service",
|
||||
Domain: "test.example.com",
|
||||
Source: reverseproxy.SourceEphemeral,
|
||||
Source: rpservice.SourceEphemeral,
|
||||
SourcePeer: ownerPeerID,
|
||||
}
|
||||
}
|
||||
|
||||
newPermanentService := func() *reverseproxy.Service {
|
||||
return &reverseproxy.Service{
|
||||
newPermanentService := func() *rpservice.Service {
|
||||
return &rpservice.Service{
|
||||
ID: serviceID,
|
||||
AccountID: accountID,
|
||||
Name: "api-service",
|
||||
Domain: "api.example.com",
|
||||
Source: reverseproxy.SourcePermanent,
|
||||
Source: rpservice.SourcePermanent,
|
||||
}
|
||||
}
|
||||
|
||||
newProxyServer := func(t *testing.T) *nbgrpc.ProxyServiceServer {
|
||||
t.Helper()
|
||||
tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Hour)
|
||||
srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil)
|
||||
t.Cleanup(srv.Close)
|
||||
tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 1*time.Hour, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
pkceStore, err := nbgrpc.NewPKCEVerifierStore(context.Background(), 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
srv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil)
|
||||
return srv
|
||||
}
|
||||
|
||||
@@ -455,10 +458,14 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID).
|
||||
Return(testPeer, nil)
|
||||
|
||||
mgr := &managerImpl{
|
||||
store: mockStore,
|
||||
accountManager: mockAccountMgr,
|
||||
proxyGRPCServer: newProxyServer(t),
|
||||
mgr := &Manager{
|
||||
store: mockStore,
|
||||
accountManager: mockAccountMgr,
|
||||
proxyController: func() proxy.Controller {
|
||||
c, err := proxymanager.NewGRPCController(newProxyServer(t), noop.NewMeterProvider().Meter(""))
|
||||
require.NoError(t, err)
|
||||
return c
|
||||
}(),
|
||||
}
|
||||
|
||||
err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed)
|
||||
@@ -482,7 +489,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
return fn(txMock)
|
||||
})
|
||||
|
||||
mgr := &managerImpl{
|
||||
mgr := &Manager{
|
||||
store: mockStore,
|
||||
}
|
||||
|
||||
@@ -511,7 +518,7 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
return fn(txMock)
|
||||
})
|
||||
|
||||
mgr := &managerImpl{
|
||||
mgr := &Manager{
|
||||
store: mockStore,
|
||||
}
|
||||
|
||||
@@ -553,10 +560,14 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID).
|
||||
Return(testPeer, nil)
|
||||
|
||||
mgr := &managerImpl{
|
||||
store: mockStore,
|
||||
accountManager: mockAccountMgr,
|
||||
proxyGRPCServer: newProxyServer(t),
|
||||
mgr := &Manager{
|
||||
store: mockStore,
|
||||
accountManager: mockAccountMgr,
|
||||
proxyController: func() proxy.Controller {
|
||||
c, err := proxymanager.NewGRPCController(newProxyServer(t), noop.NewMeterProvider().Meter(""))
|
||||
require.NoError(t, err)
|
||||
return c
|
||||
}(),
|
||||
}
|
||||
|
||||
err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceExposeExpired)
|
||||
@@ -593,10 +604,14 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
GetPeerByID(ctx, store.LockingStrengthNone, accountID, ownerPeerID).
|
||||
Return(testPeer, nil)
|
||||
|
||||
mgr := &managerImpl{
|
||||
store: mockStore,
|
||||
accountManager: mockAccountMgr,
|
||||
proxyGRPCServer: newProxyServer(t),
|
||||
mgr := &Manager{
|
||||
store: mockStore,
|
||||
accountManager: mockAccountMgr,
|
||||
proxyController: func() proxy.Controller {
|
||||
c, err := proxymanager.NewGRPCController(newProxyServer(t), noop.NewMeterProvider().Meter(""))
|
||||
require.NoError(t, err)
|
||||
return c
|
||||
}(),
|
||||
}
|
||||
|
||||
err := mgr.deletePeerService(ctx, accountID, ownerPeerID, serviceID, activity.PeerServiceUnexposed)
|
||||
@@ -609,19 +624,6 @@ func TestDeletePeerService_SourcePeerValidation(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// noopExtraSettings is a minimal extra_settings.Manager for tests without external integrations.
|
||||
type noopExtraSettings struct{}
|
||||
|
||||
func (n *noopExtraSettings) GetExtraSettings(_ context.Context, _ string) (*types.ExtraSettings, error) {
|
||||
return &types.ExtraSettings{}, nil
|
||||
}
|
||||
|
||||
func (n *noopExtraSettings) UpdateExtraSettings(_ context.Context, _, _ string, _ *types.ExtraSettings) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var _ extra_settings.Manager = (*noopExtraSettings)(nil)
|
||||
|
||||
// testClusterDeriver is a minimal ClusterDeriver that returns a fixed domain list.
|
||||
type testClusterDeriver struct {
|
||||
domains []string
|
||||
@@ -643,7 +645,7 @@ const (
|
||||
)
|
||||
|
||||
// setupIntegrationTest creates a real SQLite store with seeded test data for integration tests.
|
||||
func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) {
|
||||
func setupIntegrationTest(t *testing.T) (*Manager, store.Store) {
|
||||
t.Helper()
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -691,35 +693,34 @@ func setupIntegrationTest(t *testing.T) (*managerImpl, store.Store) {
|
||||
require.NoError(t, err)
|
||||
|
||||
permsMgr := permissions.NewManager(testStore)
|
||||
usersMgr := users.NewManager(testStore)
|
||||
settingsMgr := settings.NewManager(testStore, usersMgr, &noopExtraSettings{}, permsMgr, settings.IdpConfig{})
|
||||
|
||||
var storedEvents []activity.Activity
|
||||
accountMgr := &mock_server.MockAccountManager{
|
||||
StoreEventFunc: func(_ context.Context, _, _, _ string, activityID activity.ActivityDescriber, _ map[string]any) {
|
||||
storedEvents = append(storedEvents, activityID.(activity.Activity))
|
||||
},
|
||||
StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {},
|
||||
UpdateAccountPeersFunc: func(_ context.Context, _ string) {},
|
||||
GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) {
|
||||
return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID)
|
||||
},
|
||||
}
|
||||
|
||||
tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Hour)
|
||||
proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, nbgrpc.ProxyOIDCConfig{}, nil, nil)
|
||||
t.Cleanup(proxySrv.Close)
|
||||
tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil)
|
||||
|
||||
mgr := &managerImpl{
|
||||
proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter(""))
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := &Manager{
|
||||
store: testStore,
|
||||
accountManager: accountMgr,
|
||||
permissionsManager: permsMgr,
|
||||
settingsManager: settingsMgr,
|
||||
proxyGRPCServer: proxySrv,
|
||||
proxyController: proxyController,
|
||||
clusterDeriver: &testClusterDeriver{
|
||||
domains: []string{"test.netbird.io"},
|
||||
},
|
||||
}
|
||||
mgr.exposeTracker = &exposeTracker{manager: mgr}
|
||||
mgr.exposeReaper = &exposeReaper{manager: mgr}
|
||||
|
||||
return mgr, testStore
|
||||
}
|
||||
@@ -788,7 +789,7 @@ func Test_validateExposePermission(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().GetAccountSettings(gomock.Any(), gomock.Any(), testAccountID).Return(nil, errors.New("store error"))
|
||||
mgr := &managerImpl{store: mockStore}
|
||||
mgr := &Manager{store: mockStore}
|
||||
err := mgr.validateExposePermission(ctx, testAccountID, testPeerID)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "get account settings")
|
||||
@@ -801,7 +802,7 @@ func TestCreateServiceFromPeer(t *testing.T) {
|
||||
t.Run("creates service with random domain", func(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
}
|
||||
@@ -813,10 +814,10 @@ func TestCreateServiceFromPeer(t *testing.T) {
|
||||
assert.NotEmpty(t, resp.ServiceURL, "service URL should be set")
|
||||
|
||||
// Verify service is persisted in store
|
||||
persisted, err := testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain)
|
||||
persisted, err := testStore.GetServiceByDomain(ctx, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, resp.Domain, persisted.Domain)
|
||||
assert.Equal(t, reverseproxy.SourceEphemeral, persisted.Source, "source should be ephemeral")
|
||||
assert.Equal(t, rpservice.SourceEphemeral, persisted.Source, "source should be ephemeral")
|
||||
assert.Equal(t, testPeerID, persisted.SourcePeer, "source peer should be set")
|
||||
assert.NotNil(t, persisted.Meta.LastRenewedAt, "last renewed should be set")
|
||||
})
|
||||
@@ -824,7 +825,7 @@ func TestCreateServiceFromPeer(t *testing.T) {
|
||||
t.Run("creates service with custom domain", func(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 80,
|
||||
Protocol: "http",
|
||||
Domain: "example.com",
|
||||
@@ -845,7 +846,7 @@ func TestCreateServiceFromPeer(t *testing.T) {
|
||||
err = testStore.SaveAccountSettings(ctx, testAccountID, s)
|
||||
require.NoError(t, err)
|
||||
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
}
|
||||
@@ -858,7 +859,7 @@ func TestCreateServiceFromPeer(t *testing.T) {
|
||||
t.Run("validates request fields", func(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 0,
|
||||
Protocol: "http",
|
||||
}
|
||||
@@ -872,67 +873,67 @@ func TestCreateServiceFromPeer(t *testing.T) {
|
||||
func TestExposeServiceRequestValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req reverseproxy.ExposeServiceRequest
|
||||
req rpservice.ExposeServiceRequest
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "valid http request",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 8080, Protocol: "http"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 8080, Protocol: "http"},
|
||||
wantErr: "",
|
||||
},
|
||||
{
|
||||
name: "valid https request with pin",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 443, Protocol: "https", Pin: "123456"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 443, Protocol: "https", Pin: "123456"},
|
||||
wantErr: "",
|
||||
},
|
||||
{
|
||||
name: "port zero rejected",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 0, Protocol: "http"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 0, Protocol: "http"},
|
||||
wantErr: "port must be between 1 and 65535",
|
||||
},
|
||||
{
|
||||
name: "negative port rejected",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: -1, Protocol: "http"},
|
||||
req: rpservice.ExposeServiceRequest{Port: -1, Protocol: "http"},
|
||||
wantErr: "port must be between 1 and 65535",
|
||||
},
|
||||
{
|
||||
name: "port above 65535 rejected",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 65536, Protocol: "http"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 65536, Protocol: "http"},
|
||||
wantErr: "port must be between 1 and 65535",
|
||||
},
|
||||
{
|
||||
name: "unsupported protocol",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "tcp"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "tcp"},
|
||||
wantErr: "unsupported protocol",
|
||||
},
|
||||
{
|
||||
name: "invalid pin format",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "abc"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "abc"},
|
||||
wantErr: "invalid pin",
|
||||
},
|
||||
{
|
||||
name: "pin too short",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "12345"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "12345"},
|
||||
wantErr: "invalid pin",
|
||||
},
|
||||
{
|
||||
name: "valid 6-digit pin",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "000000"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", Pin: "000000"},
|
||||
wantErr: "",
|
||||
},
|
||||
{
|
||||
name: "empty user group name",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", UserGroups: []string{"valid", ""}},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", UserGroups: []string{"valid", ""}},
|
||||
wantErr: "user group name cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "invalid name prefix",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "INVALID"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "INVALID"},
|
||||
wantErr: "invalid name prefix",
|
||||
},
|
||||
{
|
||||
name: "valid name prefix",
|
||||
req: reverseproxy.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "my-service"},
|
||||
req: rpservice.ExposeServiceRequest{Port: 80, Protocol: "http", NamePrefix: "my-service"},
|
||||
wantErr: "",
|
||||
},
|
||||
}
|
||||
@@ -950,7 +951,7 @@ func TestExposeServiceRequestValidate(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("nil receiver", func(t *testing.T) {
|
||||
var req *reverseproxy.ExposeServiceRequest
|
||||
var req *rpservice.ExposeServiceRequest
|
||||
err := req.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "request cannot be nil")
|
||||
@@ -964,7 +965,7 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
|
||||
// First create a service
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
}
|
||||
@@ -976,14 +977,14 @@ func TestDeleteServiceFromPeer_ByDomain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify service is deleted
|
||||
_, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain)
|
||||
_, err = testStore.GetServiceByDomain(ctx, resp.Domain)
|
||||
require.Error(t, err, "service should be deleted")
|
||||
})
|
||||
|
||||
t.Run("expire uses correct activity", func(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
}
|
||||
@@ -1001,7 +1002,7 @@ func TestStopServiceFromPeer(t *testing.T) {
|
||||
t.Run("stops service by domain", func(t *testing.T) {
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
|
||||
req := &reverseproxy.ExposeServiceRequest{
|
||||
req := &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
}
|
||||
@@ -1011,58 +1012,64 @@ func TestStopServiceFromPeer(t *testing.T) {
|
||||
err = mgr.StopServiceFromPeer(ctx, testAccountID, testPeerID, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testStore.GetServiceByDomain(ctx, testAccountID, resp.Domain)
|
||||
_, err = testStore.GetServiceByDomain(ctx, resp.Domain)
|
||||
require.Error(t, err, "service should be deleted")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteService_UntracksEphemeralExpose(t *testing.T) {
|
||||
func TestDeleteService_DeletesEphemeralExpose(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
mgr, testStore := setupIntegrationTest(t)
|
||||
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be tracked after create")
|
||||
|
||||
// Look up the service by domain to get its store ID
|
||||
svc, err := mgr.store.GetServiceByDomain(ctx, testAccountID, resp.Domain)
|
||||
count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), count, "one ephemeral service should exist after create")
|
||||
|
||||
svc, err := testStore.GetServiceByDomain(ctx, resp.Domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete via the API path (user-initiated)
|
||||
err = mgr.DeleteService(ctx, testAccountID, testUserID, svc.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "expose should be untracked after API delete")
|
||||
count, err = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), count, "ephemeral service should be deleted after API delete")
|
||||
|
||||
// A new expose should succeed (not blocked by stale tracking)
|
||||
_, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
_, err = mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 9090,
|
||||
Protocol: "http",
|
||||
})
|
||||
assert.NoError(t, err, "new expose should succeed after API delete cleared tracking")
|
||||
assert.NoError(t, err, "new expose should succeed after API delete")
|
||||
}
|
||||
|
||||
func TestDeleteAllServices_UntracksEphemeralExposes(t *testing.T) {
|
||||
func TestDeleteAllServices_DeletesEphemeralExposes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
|
||||
for i := range 3 {
|
||||
_, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
_, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8080 + i,
|
||||
Protocol: "http",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 3, mgr.exposeTracker.CountPeerExposes(testPeerID), "all exposes should be tracked")
|
||||
count, err := mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(3), count, "all ephemeral services should exist")
|
||||
|
||||
err := mgr.DeleteAllServices(ctx, testAccountID, testUserID)
|
||||
err = mgr.DeleteAllServices(ctx, testAccountID, testUserID)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 0, mgr.exposeTracker.CountPeerExposes(testPeerID), "all exposes should be untracked after DeleteAllServices")
|
||||
count, err = mgr.store.CountEphemeralServicesByPeer(ctx, store.LockingStrengthNone, testAccountID, testPeerID)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), count, "all ephemeral services should be deleted after DeleteAllServices")
|
||||
}
|
||||
|
||||
func TestRenewServiceFromPeer(t *testing.T) {
|
||||
@@ -1071,7 +1078,7 @@ func TestRenewServiceFromPeer(t *testing.T) {
|
||||
t.Run("renews tracked expose", func(t *testing.T) {
|
||||
mgr, _ := setupIntegrationTest(t)
|
||||
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &reverseproxy.ExposeServiceRequest{
|
||||
resp, err := mgr.CreateServiceFromPeer(ctx, testAccountID, testPeerID, &rpservice.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
})
|
||||
@@ -1112,3 +1119,75 @@ func TestGetGroupIDsFromNames(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "no group names provided")
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteService_DeletesTargets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
accountID := "test-account"
|
||||
userID := "test-user"
|
||||
|
||||
sqlStore, err := store.NewStore(ctx, types.SqliteStoreEngine, t.TempDir(), nil, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockPerms := permissions.NewMockManager(ctrl)
|
||||
mockAcct := account.NewMockManager(ctrl)
|
||||
|
||||
tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 1*time.Hour, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
proxySrv := nbgrpc.NewProxyServiceServer(nil, tokenStore, pkceStore, nbgrpc.ProxyOIDCConfig{}, nil, nil, nil)
|
||||
|
||||
proxyController, err := proxymanager.NewGRPCController(proxySrv, noop.NewMeterProvider().Meter(""))
|
||||
require.NoError(t, err)
|
||||
|
||||
mgr := &Manager{
|
||||
store: sqlStore,
|
||||
permissionsManager: mockPerms,
|
||||
accountManager: mockAcct,
|
||||
proxyController: proxyController,
|
||||
}
|
||||
|
||||
service := &rpservice.Service{
|
||||
ID: "service-1",
|
||||
AccountID: accountID,
|
||||
Domain: "test.example.com",
|
||||
ProxyCluster: "cluster1",
|
||||
Enabled: true,
|
||||
Targets: []*rpservice.Target{
|
||||
{AccountID: accountID, ServiceID: "service-1", TargetType: rpservice.TargetTypePeer, TargetId: "peer-1"},
|
||||
{AccountID: accountID, ServiceID: "service-1", TargetType: rpservice.TargetTypePeer, TargetId: "peer-2"},
|
||||
{AccountID: accountID, ServiceID: "service-1", TargetType: rpservice.TargetTypePeer, TargetId: "peer-3"},
|
||||
},
|
||||
}
|
||||
|
||||
err = sqlStore.CreateService(ctx, service)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedService, err := sqlStore.GetServiceByID(ctx, store.LockingStrengthNone, accountID, service.ID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, retrievedService.Targets, 3, "Service should have 3 targets before deletion")
|
||||
|
||||
mockPerms.EXPECT().
|
||||
ValidateUserPermissions(ctx, accountID, userID, modules.Services, operations.Delete).
|
||||
Return(true, nil)
|
||||
mockAcct.EXPECT().
|
||||
StoreEvent(ctx, userID, service.ID, accountID, activity.ServiceDeleted, gomock.Any())
|
||||
mockAcct.EXPECT().
|
||||
UpdateAccountPeers(ctx, accountID)
|
||||
|
||||
err = mgr.DeleteService(ctx, accountID, userID, service.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = sqlStore.GetServiceByID(ctx, store.LockingStrengthNone, accountID, service.ID)
|
||||
require.Error(t, err)
|
||||
s, ok := status.FromError(err)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, status.NotFound, s.Type())
|
||||
|
||||
targets, err := sqlStore.GetTargetsByServiceID(ctx, store.LockingStrengthNone, accountID, service.ID)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, targets, 0, "All targets should be deleted when service is deleted")
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package reverseproxy
|
||||
package service
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
@@ -6,14 +6,18 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/xid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
"github.com/netbirdio/netbird/shared/hash/argon2id"
|
||||
"github.com/netbirdio/netbird/util/crypt"
|
||||
|
||||
@@ -29,15 +33,15 @@ const (
|
||||
Delete Operation = "delete"
|
||||
)
|
||||
|
||||
type ProxyStatus string
|
||||
type Status string
|
||||
|
||||
const (
|
||||
StatusPending ProxyStatus = "pending"
|
||||
StatusActive ProxyStatus = "active"
|
||||
StatusTunnelNotCreated ProxyStatus = "tunnel_not_created"
|
||||
StatusCertificatePending ProxyStatus = "certificate_pending"
|
||||
StatusCertificateFailed ProxyStatus = "certificate_failed"
|
||||
StatusError ProxyStatus = "error"
|
||||
StatusPending Status = "pending"
|
||||
StatusActive Status = "active"
|
||||
StatusTunnelNotCreated Status = "tunnel_not_created"
|
||||
StatusCertificatePending Status = "certificate_pending"
|
||||
StatusCertificateFailed Status = "certificate_failed"
|
||||
StatusError Status = "error"
|
||||
|
||||
TargetTypePeer = "peer"
|
||||
TargetTypeHost = "host"
|
||||
@@ -48,17 +52,25 @@ const (
|
||||
SourceEphemeral = "ephemeral"
|
||||
)
|
||||
|
||||
type TargetOptions struct {
|
||||
SkipTLSVerify bool `json:"skip_tls_verify"`
|
||||
RequestTimeout time.Duration `json:"request_timeout,omitempty"`
|
||||
PathRewrite PathRewriteMode `json:"path_rewrite,omitempty"`
|
||||
CustomHeaders map[string]string `gorm:"serializer:json" json:"custom_headers,omitempty"`
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
ID uint `gorm:"primaryKey" json:"-"`
|
||||
AccountID string `gorm:"index:idx_target_account;not null" json:"-"`
|
||||
ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"`
|
||||
Path *string `json:"path,omitempty"`
|
||||
Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored
|
||||
Port int `gorm:"index:idx_target_port" json:"port"`
|
||||
Protocol string `gorm:"index:idx_target_protocol" json:"protocol"`
|
||||
TargetId string `gorm:"index:idx_target_id" json:"target_id"`
|
||||
TargetType string `gorm:"index:idx_target_type" json:"target_type"`
|
||||
Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"`
|
||||
ID uint `gorm:"primaryKey" json:"-"`
|
||||
AccountID string `gorm:"index:idx_target_account;not null" json:"-"`
|
||||
ServiceID string `gorm:"index:idx_service_targets;not null" json:"-"`
|
||||
Path *string `json:"path,omitempty"`
|
||||
Host string `json:"host"` // the Host field is only used for subnet targets, otherwise ignored
|
||||
Port int `gorm:"index:idx_target_port" json:"port"`
|
||||
Protocol string `gorm:"index:idx_target_protocol" json:"protocol"`
|
||||
TargetId string `gorm:"index:idx_target_id" json:"target_id"`
|
||||
TargetType string `gorm:"index:idx_target_type" json:"target_type"`
|
||||
Enabled bool `gorm:"index:idx_target_enabled" json:"enabled"`
|
||||
Options TargetOptions `gorm:"embedded" json:"options"`
|
||||
}
|
||||
|
||||
type PasswordAuthConfig struct {
|
||||
@@ -111,14 +123,7 @@ func (a *AuthConfig) ClearSecrets() {
|
||||
}
|
||||
}
|
||||
|
||||
type OIDCValidationConfig struct {
|
||||
Issuer string
|
||||
Audiences []string
|
||||
KeysLocation string
|
||||
MaxTokenAgeSeconds int64
|
||||
}
|
||||
|
||||
type ServiceMeta struct {
|
||||
type Meta struct {
|
||||
CreatedAt time.Time
|
||||
CertificateIssuedAt *time.Time
|
||||
Status string
|
||||
@@ -129,18 +134,18 @@ type Service struct {
|
||||
ID string `gorm:"primaryKey"`
|
||||
AccountID string `gorm:"index"`
|
||||
Name string
|
||||
Domain string `gorm:"index"`
|
||||
Domain string `gorm:"type:varchar(255);uniqueIndex"`
|
||||
ProxyCluster string `gorm:"index"`
|
||||
Targets []*Target `gorm:"foreignKey:ServiceID;constraint:OnDelete:CASCADE"`
|
||||
Enabled bool
|
||||
PassHostHeader bool
|
||||
RewriteRedirects bool
|
||||
Auth AuthConfig `gorm:"serializer:json"`
|
||||
Meta ServiceMeta `gorm:"embedded;embeddedPrefix:meta_"`
|
||||
SessionPrivateKey string `gorm:"column:session_private_key"`
|
||||
SessionPublicKey string `gorm:"column:session_public_key"`
|
||||
Source string `gorm:"default:'permanent'"`
|
||||
SourcePeer string
|
||||
Auth AuthConfig `gorm:"serializer:json"`
|
||||
Meta Meta `gorm:"embedded;embeddedPrefix:meta_"`
|
||||
SessionPrivateKey string `gorm:"column:session_private_key"`
|
||||
SessionPublicKey string `gorm:"column:session_public_key"`
|
||||
Source string `gorm:"default:'permanent';index:idx_service_source_peer"`
|
||||
SourcePeer string `gorm:"index:idx_service_source_peer"`
|
||||
}
|
||||
|
||||
func NewService(accountID, name, domain, proxyCluster string, targets []*Target, enabled bool) *Service {
|
||||
@@ -165,7 +170,7 @@ func NewService(accountID, name, domain, proxyCluster string, targets []*Target,
|
||||
// only be called during initial creation, not for updates.
|
||||
func (s *Service) InitNewRecord() {
|
||||
s.ID = xid.New().String()
|
||||
s.Meta = ServiceMeta{
|
||||
s.Meta = Meta{
|
||||
CreatedAt: time.Now(),
|
||||
Status: string(StatusPending),
|
||||
}
|
||||
@@ -200,7 +205,7 @@ func (s *Service) ToAPIResponse() *api.Service {
|
||||
// Convert internal targets to API targets
|
||||
apiTargets := make([]api.ServiceTarget, 0, len(s.Targets))
|
||||
for _, target := range s.Targets {
|
||||
apiTargets = append(apiTargets, api.ServiceTarget{
|
||||
st := api.ServiceTarget{
|
||||
Path: target.Path,
|
||||
Host: &target.Host,
|
||||
Port: target.Port,
|
||||
@@ -208,7 +213,9 @@ func (s *Service) ToAPIResponse() *api.Service {
|
||||
TargetId: target.TargetId,
|
||||
TargetType: api.ServiceTargetTargetType(target.TargetType),
|
||||
Enabled: target.Enabled,
|
||||
})
|
||||
}
|
||||
st.Options = targetOptionsToAPI(target.Options)
|
||||
apiTargets = append(apiTargets, st)
|
||||
}
|
||||
|
||||
meta := api.ServiceMeta{
|
||||
@@ -239,7 +246,7 @@ func (s *Service) ToAPIResponse() *api.Service {
|
||||
return resp
|
||||
}
|
||||
|
||||
func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConfig OIDCValidationConfig) *proto.ProxyMapping {
|
||||
func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConfig proxy.OIDCValidationConfig) *proto.ProxyMapping {
|
||||
pathMappings := make([]*proto.PathMapping, 0, len(s.Targets))
|
||||
for _, target := range s.Targets {
|
||||
if !target.Enabled {
|
||||
@@ -262,10 +269,14 @@ func (s *Service) ToProtoMapping(operation Operation, authToken string, oidcConf
|
||||
if target.Path != nil {
|
||||
path = *target.Path
|
||||
}
|
||||
pathMappings = append(pathMappings, &proto.PathMapping{
|
||||
|
||||
pm := &proto.PathMapping{
|
||||
Path: path,
|
||||
Target: targetURL.String(),
|
||||
})
|
||||
}
|
||||
|
||||
pm.Options = targetOptionsToProto(target.Options)
|
||||
pathMappings = append(pathMappings, pm)
|
||||
}
|
||||
|
||||
auth := &proto.Authentication{
|
||||
@@ -318,13 +329,87 @@ func isDefaultPort(scheme string, port int) bool {
|
||||
return (scheme == "https" && port == 443) || (scheme == "http" && port == 80)
|
||||
}
|
||||
|
||||
func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) {
|
||||
// PathRewriteMode controls how the request path is rewritten before forwarding.
|
||||
type PathRewriteMode string
|
||||
|
||||
const (
|
||||
PathRewritePreserve PathRewriteMode = "preserve"
|
||||
)
|
||||
|
||||
func pathRewriteToProto(mode PathRewriteMode) proto.PathRewriteMode {
|
||||
switch mode {
|
||||
case PathRewritePreserve:
|
||||
return proto.PathRewriteMode_PATH_REWRITE_PRESERVE
|
||||
default:
|
||||
return proto.PathRewriteMode_PATH_REWRITE_DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
func targetOptionsToAPI(opts TargetOptions) *api.ServiceTargetOptions {
|
||||
if !opts.SkipTLSVerify && opts.RequestTimeout == 0 && opts.PathRewrite == "" && len(opts.CustomHeaders) == 0 {
|
||||
return nil
|
||||
}
|
||||
apiOpts := &api.ServiceTargetOptions{}
|
||||
if opts.SkipTLSVerify {
|
||||
apiOpts.SkipTlsVerify = &opts.SkipTLSVerify
|
||||
}
|
||||
if opts.RequestTimeout != 0 {
|
||||
s := opts.RequestTimeout.String()
|
||||
apiOpts.RequestTimeout = &s
|
||||
}
|
||||
if opts.PathRewrite != "" {
|
||||
pr := api.ServiceTargetOptionsPathRewrite(opts.PathRewrite)
|
||||
apiOpts.PathRewrite = &pr
|
||||
}
|
||||
if len(opts.CustomHeaders) > 0 {
|
||||
apiOpts.CustomHeaders = &opts.CustomHeaders
|
||||
}
|
||||
return apiOpts
|
||||
}
|
||||
|
||||
func targetOptionsToProto(opts TargetOptions) *proto.PathTargetOptions {
|
||||
if !opts.SkipTLSVerify && opts.PathRewrite == "" && opts.RequestTimeout == 0 && len(opts.CustomHeaders) == 0 {
|
||||
return nil
|
||||
}
|
||||
popts := &proto.PathTargetOptions{
|
||||
SkipTlsVerify: opts.SkipTLSVerify,
|
||||
PathRewrite: pathRewriteToProto(opts.PathRewrite),
|
||||
CustomHeaders: opts.CustomHeaders,
|
||||
}
|
||||
if opts.RequestTimeout != 0 {
|
||||
popts.RequestTimeout = durationpb.New(opts.RequestTimeout)
|
||||
}
|
||||
return popts
|
||||
}
|
||||
|
||||
func targetOptionsFromAPI(idx int, o *api.ServiceTargetOptions) (TargetOptions, error) {
|
||||
var opts TargetOptions
|
||||
if o.SkipTlsVerify != nil {
|
||||
opts.SkipTLSVerify = *o.SkipTlsVerify
|
||||
}
|
||||
if o.RequestTimeout != nil {
|
||||
d, err := time.ParseDuration(*o.RequestTimeout)
|
||||
if err != nil {
|
||||
return opts, fmt.Errorf("target %d: parse request_timeout %q: %w", idx, *o.RequestTimeout, err)
|
||||
}
|
||||
opts.RequestTimeout = d
|
||||
}
|
||||
if o.PathRewrite != nil {
|
||||
opts.PathRewrite = PathRewriteMode(*o.PathRewrite)
|
||||
}
|
||||
if o.CustomHeaders != nil {
|
||||
opts.CustomHeaders = *o.CustomHeaders
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) error {
|
||||
s.Name = req.Name
|
||||
s.Domain = req.Domain
|
||||
s.AccountID = accountID
|
||||
|
||||
targets := make([]*Target, 0, len(req.Targets))
|
||||
for _, apiTarget := range req.Targets {
|
||||
for i, apiTarget := range req.Targets {
|
||||
target := &Target{
|
||||
AccountID: accountID,
|
||||
Path: apiTarget.Path,
|
||||
@@ -337,6 +422,13 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) {
|
||||
if apiTarget.Host != nil {
|
||||
target.Host = *apiTarget.Host
|
||||
}
|
||||
if apiTarget.Options != nil {
|
||||
opts, err := targetOptionsFromAPI(i, apiTarget.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target.Options = opts
|
||||
}
|
||||
targets = append(targets, target)
|
||||
}
|
||||
s.Targets = targets
|
||||
@@ -374,6 +466,8 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) {
|
||||
}
|
||||
s.Auth.BearerAuth = bearerAuth
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) Validate() error {
|
||||
@@ -406,11 +500,113 @@ func (s *Service) Validate() error {
|
||||
if target.TargetId == "" {
|
||||
return fmt.Errorf("target %d has empty target_id", i)
|
||||
}
|
||||
if err := validateTargetOptions(i, &target.Options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
maxRequestTimeout = 5 * time.Minute
|
||||
maxCustomHeaders = 16
|
||||
maxHeaderKeyLen = 128
|
||||
maxHeaderValueLen = 4096
|
||||
)
|
||||
|
||||
// httpHeaderNameRe matches valid HTTP header field names per RFC 7230 token definition.
|
||||
var httpHeaderNameRe = regexp.MustCompile(`^[!#$%&'*+\-.^_` + "`" + `|~0-9A-Za-z]+$`)
|
||||
|
||||
// hopByHopHeaders are headers that must not be set as custom headers
|
||||
// because they are connection-level and stripped by the proxy.
|
||||
var hopByHopHeaders = map[string]struct{}{
|
||||
"Connection": {},
|
||||
"Keep-Alive": {},
|
||||
"Proxy-Authenticate": {},
|
||||
"Proxy-Authorization": {},
|
||||
"Proxy-Connection": {},
|
||||
"Te": {},
|
||||
"Trailer": {},
|
||||
"Transfer-Encoding": {},
|
||||
"Upgrade": {},
|
||||
}
|
||||
|
||||
// reservedHeaders are set authoritatively by the proxy or control HTTP framing
|
||||
// and cannot be overridden.
|
||||
var reservedHeaders = map[string]struct{}{
|
||||
"Content-Length": {},
|
||||
"Content-Type": {},
|
||||
"Cookie": {},
|
||||
"Forwarded": {},
|
||||
"X-Forwarded-For": {},
|
||||
"X-Forwarded-Host": {},
|
||||
"X-Forwarded-Port": {},
|
||||
"X-Forwarded-Proto": {},
|
||||
"X-Real-Ip": {},
|
||||
}
|
||||
|
||||
func validateTargetOptions(idx int, opts *TargetOptions) error {
|
||||
if opts.PathRewrite != "" && opts.PathRewrite != PathRewritePreserve {
|
||||
return fmt.Errorf("target %d: unknown path_rewrite mode %q", idx, opts.PathRewrite)
|
||||
}
|
||||
|
||||
if opts.RequestTimeout != 0 {
|
||||
if opts.RequestTimeout <= 0 {
|
||||
return fmt.Errorf("target %d: request_timeout must be positive", idx)
|
||||
}
|
||||
if opts.RequestTimeout > maxRequestTimeout {
|
||||
return fmt.Errorf("target %d: request_timeout exceeds maximum of %s", idx, maxRequestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateCustomHeaders(idx, opts.CustomHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateCustomHeaders(idx int, headers map[string]string) error {
|
||||
if len(headers) > maxCustomHeaders {
|
||||
return fmt.Errorf("target %d: custom_headers count %d exceeds maximum of %d", idx, len(headers), maxCustomHeaders)
|
||||
}
|
||||
seen := make(map[string]string, len(headers))
|
||||
for key, value := range headers {
|
||||
if !httpHeaderNameRe.MatchString(key) {
|
||||
return fmt.Errorf("target %d: custom header key %q is not a valid HTTP header name", idx, key)
|
||||
}
|
||||
if len(key) > maxHeaderKeyLen {
|
||||
return fmt.Errorf("target %d: custom header key %q exceeds maximum length of %d", idx, key, maxHeaderKeyLen)
|
||||
}
|
||||
if len(value) > maxHeaderValueLen {
|
||||
return fmt.Errorf("target %d: custom header %q value exceeds maximum length of %d", idx, key, maxHeaderValueLen)
|
||||
}
|
||||
if containsCRLF(key) || containsCRLF(value) {
|
||||
return fmt.Errorf("target %d: custom header %q contains invalid characters", idx, key)
|
||||
}
|
||||
canonical := http.CanonicalHeaderKey(key)
|
||||
if prev, ok := seen[canonical]; ok {
|
||||
return fmt.Errorf("target %d: custom header keys %q and %q collide (both canonicalize to %q)", idx, prev, key, canonical)
|
||||
}
|
||||
seen[canonical] = key
|
||||
if _, ok := hopByHopHeaders[canonical]; ok {
|
||||
return fmt.Errorf("target %d: custom header %q is a hop-by-hop header and cannot be set", idx, key)
|
||||
}
|
||||
if _, ok := reservedHeaders[canonical]; ok {
|
||||
return fmt.Errorf("target %d: custom header %q is managed by the proxy and cannot be overridden", idx, key)
|
||||
}
|
||||
if canonical == "Host" {
|
||||
return fmt.Errorf("target %d: use pass_host_header instead of setting Host as a custom header", idx)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func containsCRLF(s string) bool {
|
||||
return strings.ContainsAny(s, "\r\n")
|
||||
}
|
||||
|
||||
func (s *Service) EventMeta() map[string]any {
|
||||
return map[string]any{"name": s.Name, "domain": s.Domain, "proxy_cluster": s.ProxyCluster, "source": s.Source, "auth": s.isAuthEnabled()}
|
||||
}
|
||||
@@ -423,6 +619,12 @@ func (s *Service) Copy() *Service {
|
||||
targets := make([]*Target, len(s.Targets))
|
||||
for i, target := range s.Targets {
|
||||
targetCopy := *target
|
||||
if len(target.Options.CustomHeaders) > 0 {
|
||||
targetCopy.Options.CustomHeaders = make(map[string]string, len(target.Options.CustomHeaders))
|
||||
for k, v := range target.Options.CustomHeaders {
|
||||
targetCopy.Options.CustomHeaders[k] = v
|
||||
}
|
||||
}
|
||||
targets[i] = &targetCopy
|
||||
}
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
package reverseproxy
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
"github.com/netbirdio/netbird/shared/hash/argon2id"
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
@@ -86,6 +88,188 @@ func TestValidate_MultipleTargetsOneInvalid(t *testing.T) {
|
||||
assert.Contains(t, err.Error(), "empty target_id")
|
||||
}
|
||||
|
||||
func TestValidateTargetOptions_PathRewrite(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mode PathRewriteMode
|
||||
wantErr string
|
||||
}{
|
||||
{"empty is default", "", ""},
|
||||
{"preserve is valid", PathRewritePreserve, ""},
|
||||
{"unknown rejected", "regex", "unknown path_rewrite mode"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.PathRewrite = tt.mode
|
||||
err := rp.Validate()
|
||||
if tt.wantErr == "" {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateTargetOptions_RequestTimeout(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
timeout time.Duration
|
||||
wantErr string
|
||||
}{
|
||||
{"valid 30s", 30 * time.Second, ""},
|
||||
{"valid 2m", 2 * time.Minute, ""},
|
||||
{"zero is fine", 0, ""},
|
||||
{"negative", -1 * time.Second, "must be positive"},
|
||||
{"exceeds max", 10 * time.Minute, "exceeds maximum"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.RequestTimeout = tt.timeout
|
||||
err := rp.Validate()
|
||||
if tt.wantErr == "" {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateTargetOptions_CustomHeaders(t *testing.T) {
|
||||
t.Run("valid headers", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{
|
||||
"X-Custom": "value",
|
||||
"X-Trace": "abc123",
|
||||
}
|
||||
assert.NoError(t, rp.Validate())
|
||||
})
|
||||
|
||||
t.Run("CRLF in key", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{"X-Bad\r\nKey": "value"}
|
||||
assert.ErrorContains(t, rp.Validate(), "not a valid HTTP header name")
|
||||
})
|
||||
|
||||
t.Run("CRLF in value", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{"X-Good": "bad\nvalue"}
|
||||
assert.ErrorContains(t, rp.Validate(), "invalid characters")
|
||||
})
|
||||
|
||||
t.Run("hop-by-hop header rejected", func(t *testing.T) {
|
||||
for _, h := range []string{"Connection", "Transfer-Encoding", "Keep-Alive", "Upgrade", "Proxy-Connection"} {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{h: "value"}
|
||||
assert.ErrorContains(t, rp.Validate(), "hop-by-hop", "header %q should be rejected", h)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("reserved header rejected", func(t *testing.T) {
|
||||
for _, h := range []string{"X-Forwarded-For", "X-Real-IP", "X-Forwarded-Proto", "X-Forwarded-Host", "X-Forwarded-Port", "Cookie", "Forwarded", "Content-Length", "Content-Type"} {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{h: "value"}
|
||||
assert.ErrorContains(t, rp.Validate(), "managed by the proxy", "header %q should be rejected", h)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Host header rejected", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{"Host": "evil.com"}
|
||||
assert.ErrorContains(t, rp.Validate(), "pass_host_header")
|
||||
})
|
||||
|
||||
t.Run("too many headers", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
headers := make(map[string]string, 17)
|
||||
for i := range 17 {
|
||||
headers[fmt.Sprintf("X-H%d", i)] = "v"
|
||||
}
|
||||
rp.Targets[0].Options.CustomHeaders = headers
|
||||
assert.ErrorContains(t, rp.Validate(), "exceeds maximum of 16")
|
||||
})
|
||||
|
||||
t.Run("key too long", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{strings.Repeat("X", 129): "v"}
|
||||
assert.ErrorContains(t, rp.Validate(), "key")
|
||||
assert.ErrorContains(t, rp.Validate(), "exceeds maximum length")
|
||||
})
|
||||
|
||||
t.Run("value too long", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{"X-Ok": strings.Repeat("v", 4097)}
|
||||
assert.ErrorContains(t, rp.Validate(), "value exceeds maximum length")
|
||||
})
|
||||
|
||||
t.Run("duplicate canonical keys rejected", func(t *testing.T) {
|
||||
rp := validProxy()
|
||||
rp.Targets[0].Options.CustomHeaders = map[string]string{
|
||||
"x-custom": "a",
|
||||
"X-Custom": "b",
|
||||
}
|
||||
assert.ErrorContains(t, rp.Validate(), "collide")
|
||||
})
|
||||
}
|
||||
|
||||
func TestToProtoMapping_TargetOptions(t *testing.T) {
|
||||
rp := &Service{
|
||||
ID: "svc-1",
|
||||
AccountID: "acc-1",
|
||||
Domain: "example.com",
|
||||
Targets: []*Target{
|
||||
{
|
||||
TargetId: "peer-1",
|
||||
TargetType: TargetTypePeer,
|
||||
Host: "10.0.0.1",
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
Enabled: true,
|
||||
Options: TargetOptions{
|
||||
SkipTLSVerify: true,
|
||||
RequestTimeout: 30 * time.Second,
|
||||
PathRewrite: PathRewritePreserve,
|
||||
CustomHeaders: map[string]string{"X-Custom": "val"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pm := rp.ToProtoMapping(Create, "token", proxy.OIDCValidationConfig{})
|
||||
require.Len(t, pm.Path, 1)
|
||||
|
||||
opts := pm.Path[0].Options
|
||||
require.NotNil(t, opts, "options should be populated")
|
||||
assert.True(t, opts.SkipTlsVerify)
|
||||
assert.Equal(t, proto.PathRewriteMode_PATH_REWRITE_PRESERVE, opts.PathRewrite)
|
||||
assert.Equal(t, map[string]string{"X-Custom": "val"}, opts.CustomHeaders)
|
||||
require.NotNil(t, opts.RequestTimeout)
|
||||
assert.Equal(t, int64(30), opts.RequestTimeout.Seconds)
|
||||
}
|
||||
|
||||
func TestToProtoMapping_NoOptionsWhenDefault(t *testing.T) {
|
||||
rp := &Service{
|
||||
ID: "svc-1",
|
||||
AccountID: "acc-1",
|
||||
Domain: "example.com",
|
||||
Targets: []*Target{
|
||||
{
|
||||
TargetId: "peer-1",
|
||||
TargetType: TargetTypePeer,
|
||||
Host: "10.0.0.1",
|
||||
Port: 8080,
|
||||
Protocol: "http",
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
pm := rp.ToProtoMapping(Create, "token", proxy.OIDCValidationConfig{})
|
||||
require.Len(t, pm.Path, 1)
|
||||
assert.Nil(t, pm.Path[0].Options, "options should be nil when all defaults")
|
||||
}
|
||||
|
||||
func TestIsDefaultPort(t *testing.T) {
|
||||
tests := []struct {
|
||||
scheme string
|
||||
@@ -109,7 +293,7 @@ func TestIsDefaultPort(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestToProtoMapping_PortInTargetURL(t *testing.T) {
|
||||
oidcConfig := OIDCValidationConfig{}
|
||||
oidcConfig := proxy.OIDCValidationConfig{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -202,7 +386,7 @@ func TestToProtoMapping_DisabledTargetSkipped(t *testing.T) {
|
||||
{TargetId: "peer-2", TargetType: TargetTypePeer, Host: "10.0.0.2", Port: 9090, Protocol: "http", Enabled: true},
|
||||
},
|
||||
}
|
||||
pm := rp.ToProtoMapping(Create, "token", OIDCValidationConfig{})
|
||||
pm := rp.ToProtoMapping(Create, "token", proxy.OIDCValidationConfig{})
|
||||
require.Len(t, pm.Path, 1)
|
||||
assert.Equal(t, "http://10.0.0.2:9090/", pm.Path[0].Target)
|
||||
}
|
||||
@@ -219,7 +403,7 @@ func TestToProtoMapping_OperationTypes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(string(tt.op), func(t *testing.T) {
|
||||
pm := rp.ToProtoMapping(tt.op, "", OIDCValidationConfig{})
|
||||
pm := rp.ToProtoMapping(tt.op, "", proxy.OIDCValidationConfig{})
|
||||
assert.Equal(t, tt.want, pm.Type)
|
||||
})
|
||||
}
|
||||
@@ -94,7 +94,7 @@ func (s *BaseServer) EventStore() activity.Store {
|
||||
|
||||
func (s *BaseServer) APIHandler() http.Handler {
|
||||
return Create(s, func() http.Handler {
|
||||
httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ReverseProxyManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies)
|
||||
httpAPIHandler, err := nbhttp.NewAPIHandler(context.Background(), s.AccountManager(), s.NetworksManager(), s.ResourcesManager(), s.RoutesManager(), s.GroupsManager(), s.GeoLocationManager(), s.AuthManager(), s.Metrics(), s.IntegratedValidator(), s.ProxyController(), s.PermissionsManager(), s.PeersManager(), s.SettingsManager(), s.ZonesManager(), s.RecordsManager(), s.NetworkMapController(), s.IdpManager(), s.ServiceManager(), s.ReverseProxyDomainManager(), s.AccessLogsManager(), s.ReverseProxyGRPCServer(), s.Config.ReverseProxy.TrustedHTTPProxies)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create API handler: %v", err)
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func (s *BaseServer) GRPCServer() *grpc.Server {
|
||||
if s.Config.HttpConfig.LetsEncryptDomain != "" {
|
||||
certManager, err := encryption.CreateCertManager(s.Config.Datadir, s.Config.HttpConfig.LetsEncryptDomain)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create certificate manager: %v", err)
|
||||
log.Fatalf("failed to create certificate service: %v", err)
|
||||
}
|
||||
transportCredentials := credentials.NewTLS(certManager.TLSConfig())
|
||||
gRPCOpts = append(gRPCOpts, grpc.Creds(transportCredentials))
|
||||
@@ -152,10 +152,10 @@ func (s *BaseServer) GRPCServer() *grpc.Server {
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create management server: %v", err)
|
||||
}
|
||||
reverseProxyMgr := s.ReverseProxyManager()
|
||||
srv.SetReverseProxyManager(reverseProxyMgr)
|
||||
if reverseProxyMgr != nil {
|
||||
reverseProxyMgr.StartExposeReaper(context.Background())
|
||||
serviceMgr := s.ServiceManager()
|
||||
srv.SetReverseProxyManager(serviceMgr)
|
||||
if serviceMgr != nil {
|
||||
serviceMgr.StartExposeReaper(context.Background())
|
||||
}
|
||||
mgmtProto.RegisterManagementServiceServer(gRPCAPIHandler, srv)
|
||||
|
||||
@@ -168,9 +168,10 @@ func (s *BaseServer) GRPCServer() *grpc.Server {
|
||||
|
||||
func (s *BaseServer) ReverseProxyGRPCServer() *nbgrpc.ProxyServiceServer {
|
||||
return Create(s, func() *nbgrpc.ProxyServiceServer {
|
||||
proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager())
|
||||
proxyService := nbgrpc.NewProxyServiceServer(s.AccessLogsManager(), s.ProxyTokenStore(), s.PKCEVerifierStore(), s.proxyOIDCConfig(), s.PeersManager(), s.UsersManager(), s.ProxyManager())
|
||||
s.AfterInit(func(s *BaseServer) {
|
||||
proxyService.SetProxyManager(s.ReverseProxyManager())
|
||||
proxyService.SetServiceManager(s.ServiceManager())
|
||||
proxyService.SetProxyController(s.ServiceProxyController())
|
||||
})
|
||||
return proxyService
|
||||
})
|
||||
@@ -193,12 +194,25 @@ func (s *BaseServer) proxyOIDCConfig() nbgrpc.ProxyOIDCConfig {
|
||||
|
||||
func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore {
|
||||
return Create(s, func() *nbgrpc.OneTimeTokenStore {
|
||||
tokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute)
|
||||
tokenStore, err := nbgrpc.NewOneTimeTokenStore(context.Background(), 5*time.Minute, 10*time.Minute, 100)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create proxy token store: %v", err)
|
||||
}
|
||||
log.Info("One-time token store initialized for proxy authentication")
|
||||
return tokenStore
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) PKCEVerifierStore() *nbgrpc.PKCEVerifierStore {
|
||||
return Create(s, func() *nbgrpc.PKCEVerifierStore {
|
||||
pkceStore, err := nbgrpc.NewPKCEVerifierStore(context.Background(), 10*time.Minute, 10*time.Minute, 100)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create PKCE verifier store: %v", err)
|
||||
}
|
||||
return pkceStore
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) AccessLogsManager() accesslogs.Manager {
|
||||
return Create(s, func() accesslogs.Manager {
|
||||
accessLogManager := accesslogsmanager.NewManager(s.Store(), s.PermissionsManager(), s.GeoLocationManager())
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/management-integrations/integrations"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/controllers/network_map"
|
||||
nmapcontroller "github.com/netbirdio/netbird/management/internals/controllers/network_map/controller"
|
||||
@@ -106,6 +108,16 @@ func (s *BaseServer) NetworkMapController() network_map.Controller {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) ServiceProxyController() proxy.Controller {
|
||||
return Create(s, func() proxy.Controller {
|
||||
controller, err := proxymanager.NewGRPCController(s.ReverseProxyGRPCServer(), s.Metrics().GetMeter())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create service proxy controller: %v", err)
|
||||
}
|
||||
return controller
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) AccountRequestBuffer() *server.AccountRequestBuffer {
|
||||
return Create(s, func() *server.AccountRequestBuffer {
|
||||
return server.NewAccountRequestBuffer(context.Background(), s.Store())
|
||||
|
||||
@@ -8,9 +8,11 @@ import (
|
||||
|
||||
"github.com/netbirdio/management-integrations/integrations"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/peers"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager"
|
||||
nbreverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
nbreverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones"
|
||||
zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones/records"
|
||||
@@ -99,11 +101,11 @@ func (s *BaseServer) AccountManager() account.Manager {
|
||||
return Create(s, func() account.Manager {
|
||||
accountManager, err := server.BuildManager(context.Background(), s.Config, s.Store(), s.NetworkMapController(), s.JobManager(), s.IdpManager(), s.mgmtSingleAccModeDomain, s.EventStore(), s.GeoLocationManager(), s.userDeleteFromIDPEnabled, s.IntegratedValidator(), s.Metrics(), s.ProxyController(), s.SettingsManager(), s.PermissionsManager(), s.Config.DisableDefaultPolicy)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create account manager: %v", err)
|
||||
log.Fatalf("failed to create account service: %v", err)
|
||||
}
|
||||
|
||||
s.AfterInit(func(s *BaseServer) {
|
||||
accountManager.SetServiceManager(s.ReverseProxyManager())
|
||||
accountManager.SetServiceManager(s.ServiceManager())
|
||||
})
|
||||
|
||||
return accountManager
|
||||
@@ -114,28 +116,28 @@ func (s *BaseServer) IdpManager() idp.Manager {
|
||||
return Create(s, func() idp.Manager {
|
||||
var idpManager idp.Manager
|
||||
var err error
|
||||
// Use embedded IdP manager if embedded Dex is configured and enabled.
|
||||
// Use embedded IdP service if embedded Dex is configured and enabled.
|
||||
// Legacy IdpManager won't be used anymore even if configured.
|
||||
if s.Config.EmbeddedIdP != nil && s.Config.EmbeddedIdP.Enabled {
|
||||
idpManager, err = idp.NewEmbeddedIdPManager(context.Background(), s.Config.EmbeddedIdP, s.Metrics())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create embedded IDP manager: %v", err)
|
||||
log.Fatalf("failed to create embedded IDP service: %v", err)
|
||||
}
|
||||
return idpManager
|
||||
}
|
||||
|
||||
// Fall back to external IdP manager
|
||||
// Fall back to external IdP service
|
||||
if s.Config.IdpManagerConfig != nil {
|
||||
idpManager, err = idp.NewManager(context.Background(), *s.Config.IdpManagerConfig, s.Metrics())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create IDP manager: %v", err)
|
||||
log.Fatalf("failed to create IDP service: %v", err)
|
||||
}
|
||||
}
|
||||
return idpManager
|
||||
})
|
||||
}
|
||||
|
||||
// OAuthConfigProvider is only relevant when we have an embedded IdP manager. Otherwise must be nil
|
||||
// OAuthConfigProvider is only relevant when we have an embedded IdP service. Otherwise must be nil
|
||||
func (s *BaseServer) OAuthConfigProvider() idp.OAuthConfigProvider {
|
||||
if s.Config.EmbeddedIdP == nil || !s.Config.EmbeddedIdP.Enabled {
|
||||
return nil
|
||||
@@ -162,7 +164,7 @@ func (s *BaseServer) GroupsManager() groups.Manager {
|
||||
|
||||
func (s *BaseServer) ResourcesManager() resources.Manager {
|
||||
return Create(s, func() resources.Manager {
|
||||
return resources.NewManager(s.Store(), s.PermissionsManager(), s.GroupsManager(), s.AccountManager(), s.ReverseProxyManager())
|
||||
return resources.NewManager(s.Store(), s.PermissionsManager(), s.GroupsManager(), s.AccountManager(), s.ServiceManager())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -190,15 +192,25 @@ func (s *BaseServer) RecordsManager() records.Manager {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) ReverseProxyManager() reverseproxy.Manager {
|
||||
return Create(s, func() reverseproxy.Manager {
|
||||
return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.SettingsManager(), s.ReverseProxyGRPCServer(), s.ReverseProxyDomainManager())
|
||||
func (s *BaseServer) ServiceManager() service.Manager {
|
||||
return Create(s, func() service.Manager {
|
||||
return nbreverseproxy.NewManager(s.Store(), s.AccountManager(), s.PermissionsManager(), s.ServiceProxyController(), s.ReverseProxyDomainManager())
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) ProxyManager() proxy.Manager {
|
||||
return Create(s, func() proxy.Manager {
|
||||
manager, err := proxymanager.NewManager(s.Store(), s.Metrics().GetMeter())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create proxy manager: %v", err)
|
||||
}
|
||||
return manager
|
||||
})
|
||||
}
|
||||
|
||||
func (s *BaseServer) ReverseProxyDomainManager() *manager.Manager {
|
||||
return Create(s, func() *manager.Manager {
|
||||
m := manager.NewManager(s.Store(), s.ReverseProxyGRPCServer(), s.PermissionsManager())
|
||||
m := manager.NewManager(s.Store(), s.ProxyManager(), s.PermissionsManager(), s.AccountManager())
|
||||
return &m
|
||||
})
|
||||
}
|
||||
|
||||
@@ -28,9 +28,13 @@ import (
|
||||
"github.com/netbirdio/netbird/version"
|
||||
)
|
||||
|
||||
// ManagementLegacyPort is the port that was used before by the Management gRPC server.
|
||||
// It is used for backward compatibility now.
|
||||
const ManagementLegacyPort = 33073
|
||||
const (
|
||||
// ManagementLegacyPort is the port that was used before by the Management gRPC server.
|
||||
// It is used for backward compatibility now.
|
||||
ManagementLegacyPort = 33073
|
||||
// DefaultSelfHostedDomain is the default domain used for self-hosted fresh installs.
|
||||
DefaultSelfHostedDomain = "netbird.selfhosted"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
Start(ctx context.Context) error
|
||||
@@ -58,6 +62,7 @@ type BaseServer struct {
|
||||
mgmtMetricsPort int
|
||||
mgmtPort int
|
||||
disableLegacyManagementPort bool
|
||||
autoResolveDomains bool
|
||||
|
||||
proxyAuthClose func()
|
||||
|
||||
@@ -81,6 +86,7 @@ type Config struct {
|
||||
DisableMetrics bool
|
||||
DisableGeoliteUpdate bool
|
||||
UserDeleteFromIDPEnabled bool
|
||||
AutoResolveDomains bool
|
||||
}
|
||||
|
||||
// NewServer initializes and configures a new Server instance
|
||||
@@ -96,6 +102,7 @@ func NewServer(cfg *Config) *BaseServer {
|
||||
mgmtPort: cfg.MgmtPort,
|
||||
disableLegacyManagementPort: cfg.DisableLegacyManagementPort,
|
||||
mgmtMetricsPort: cfg.MgmtMetricsPort,
|
||||
autoResolveDomains: cfg.AutoResolveDomains,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,6 +116,10 @@ func (s *BaseServer) Start(ctx context.Context) error {
|
||||
s.cancel = cancel
|
||||
s.errCh = make(chan error, 4)
|
||||
|
||||
if s.autoResolveDomains {
|
||||
s.resolveDomains(srvCtx)
|
||||
}
|
||||
|
||||
s.PeersManager()
|
||||
s.GeoLocationManager()
|
||||
|
||||
@@ -157,7 +168,7 @@ func (s *BaseServer) Start(ctx context.Context) error {
|
||||
|
||||
// Eagerly create the gRPC server so that all AfterInit hooks are registered
|
||||
// before we iterate them. Lazy creation after the loop would miss hooks
|
||||
// registered during GRPCServer() construction (e.g., SetProxyManager).
|
||||
// registered during GRPCServer() construction (e.g., SetServiceManager).
|
||||
s.GRPCServer()
|
||||
|
||||
for _, fn := range s.afterInit {
|
||||
@@ -237,7 +248,6 @@ func (s *BaseServer) Stop() error {
|
||||
_ = s.certManager.Listener().Close()
|
||||
}
|
||||
s.GRPCServer().Stop()
|
||||
s.ReverseProxyGRPCServer().Close()
|
||||
if s.proxyAuthClose != nil {
|
||||
s.proxyAuthClose()
|
||||
s.proxyAuthClose = nil
|
||||
@@ -381,6 +391,60 @@ func (s *BaseServer) serveGRPCWithHTTP(ctx context.Context, listener net.Listene
|
||||
}()
|
||||
}
|
||||
|
||||
// resolveDomains determines dnsDomain and mgmtSingleAccModeDomain based on store state.
|
||||
// Fresh installs use the default self-hosted domain, while existing installs reuse the
|
||||
// persisted account domain to keep addressing stable across config changes.
|
||||
func (s *BaseServer) resolveDomains(ctx context.Context) {
|
||||
st := s.Store()
|
||||
|
||||
setDefault := func(logMsg string, args ...any) {
|
||||
if logMsg != "" {
|
||||
log.WithContext(ctx).Warnf(logMsg, args...)
|
||||
}
|
||||
s.dnsDomain = DefaultSelfHostedDomain
|
||||
s.mgmtSingleAccModeDomain = DefaultSelfHostedDomain
|
||||
}
|
||||
|
||||
accountsCount, err := st.GetAccountsCounter(ctx)
|
||||
if err != nil {
|
||||
setDefault("resolve domains: failed to read accounts counter: %v; using default domain %q", err, DefaultSelfHostedDomain)
|
||||
return
|
||||
}
|
||||
|
||||
if accountsCount == 0 {
|
||||
s.dnsDomain = DefaultSelfHostedDomain
|
||||
s.mgmtSingleAccModeDomain = DefaultSelfHostedDomain
|
||||
log.WithContext(ctx).Infof("resolve domains: fresh install detected, using default domain %q", DefaultSelfHostedDomain)
|
||||
return
|
||||
}
|
||||
|
||||
accountID, err := st.GetAnyAccountID(ctx)
|
||||
if err != nil {
|
||||
setDefault("resolve domains: failed to get existing account ID: %v; using default domain %q", err, DefaultSelfHostedDomain)
|
||||
return
|
||||
}
|
||||
|
||||
if accountID == "" {
|
||||
setDefault("resolve domains: empty account ID returned for existing accounts; using default domain %q", DefaultSelfHostedDomain)
|
||||
return
|
||||
}
|
||||
|
||||
domain, _, err := st.GetAccountDomainAndCategory(ctx, store.LockingStrengthNone, accountID)
|
||||
if err != nil {
|
||||
setDefault("resolve domains: failed to get account domain for account %q: %v; using default domain %q", accountID, err, DefaultSelfHostedDomain)
|
||||
return
|
||||
}
|
||||
|
||||
if domain == "" {
|
||||
setDefault("resolve domains: account %q has empty domain; using default domain %q", accountID, DefaultSelfHostedDomain)
|
||||
return
|
||||
}
|
||||
|
||||
s.dnsDomain = domain
|
||||
s.mgmtSingleAccModeDomain = domain
|
||||
log.WithContext(ctx).Infof("resolve domains: using persisted account domain %q", domain)
|
||||
}
|
||||
|
||||
func getInstallationID(ctx context.Context, store store.Store) (string, error) {
|
||||
installationID := store.GetInstallationID()
|
||||
if installationID != "" {
|
||||
|
||||
63
management/internals/server/server_resolve_domains_test.go
Normal file
63
management/internals/server/server_resolve_domains_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
nbconfig "github.com/netbirdio/netbird/management/internals/server/config"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
)
|
||||
|
||||
func TestResolveDomains_FreshInstallUsesDefault(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().GetAccountsCounter(gomock.Any()).Return(int64(0), nil)
|
||||
|
||||
srv := NewServer(&Config{NbConfig: &nbconfig.Config{}})
|
||||
Inject[store.Store](srv, mockStore)
|
||||
|
||||
srv.resolveDomains(context.Background())
|
||||
|
||||
require.Equal(t, DefaultSelfHostedDomain, srv.dnsDomain)
|
||||
require.Equal(t, DefaultSelfHostedDomain, srv.mgmtSingleAccModeDomain)
|
||||
}
|
||||
|
||||
func TestResolveDomains_ExistingInstallUsesPersistedDomain(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().GetAccountsCounter(gomock.Any()).Return(int64(1), nil)
|
||||
mockStore.EXPECT().GetAnyAccountID(gomock.Any()).Return("acc-1", nil)
|
||||
mockStore.EXPECT().GetAccountDomainAndCategory(gomock.Any(), store.LockingStrengthNone, "acc-1").Return("vpn.mycompany.com", "", nil)
|
||||
|
||||
srv := NewServer(&Config{NbConfig: &nbconfig.Config{}})
|
||||
Inject[store.Store](srv, mockStore)
|
||||
|
||||
srv.resolveDomains(context.Background())
|
||||
|
||||
require.Equal(t, "vpn.mycompany.com", srv.dnsDomain)
|
||||
require.Equal(t, "vpn.mycompany.com", srv.mgmtSingleAccModeDomain)
|
||||
}
|
||||
|
||||
func TestResolveDomains_StoreErrorFallsBackToDefault(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().GetAccountsCounter(gomock.Any()).Return(int64(0), errors.New("db failed"))
|
||||
|
||||
srv := NewServer(&Config{NbConfig: &nbconfig.Config{}})
|
||||
Inject[store.Store](srv, mockStore)
|
||||
|
||||
srv.resolveDomains(context.Background())
|
||||
|
||||
require.Equal(t, DefaultSelfHostedDomain, srv.dnsDomain)
|
||||
require.Equal(t, DefaultSelfHostedDomain, srv.mgmtSingleAccModeDomain)
|
||||
}
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/netbirdio/netbird/encryption"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
nbContext "github.com/netbirdio/netbird/management/server/context"
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
@@ -39,7 +39,7 @@ func (s *Server) CreateExpose(ctx context.Context, req *proto.EncryptedMessage)
|
||||
return nil, status.Errorf(codes.Internal, "reverse proxy manager not available")
|
||||
}
|
||||
|
||||
created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, &reverseproxy.ExposeServiceRequest{
|
||||
created, err := reverseProxyMgr.CreateServiceFromPeer(ctx, accountID, peer.ID, &rpservice.ExposeServiceRequest{
|
||||
NamePrefix: exposeReq.NamePrefix,
|
||||
Port: int(exposeReq.Port),
|
||||
Protocol: exposeProtocolToString(exposeReq.Protocol),
|
||||
@@ -167,14 +167,14 @@ func (s *Server) authenticateExposePeer(ctx context.Context, peerKey wgtypes.Key
|
||||
return accountID, peer, nil
|
||||
}
|
||||
|
||||
func (s *Server) getReverseProxyManager() reverseproxy.Manager {
|
||||
func (s *Server) getReverseProxyManager() rpservice.Manager {
|
||||
s.reverseProxyMu.RLock()
|
||||
defer s.reverseProxyMu.RUnlock()
|
||||
return s.reverseProxyManager
|
||||
}
|
||||
|
||||
// SetReverseProxyManager sets the reverse proxy manager on the server.
|
||||
func (s *Server) SetReverseProxyManager(mgr reverseproxy.Manager) {
|
||||
func (s *Server) SetReverseProxyManager(mgr rpservice.Manager) {
|
||||
s.reverseProxyMu.Lock()
|
||||
defer s.reverseProxyMu.Unlock()
|
||||
s.reverseProxyManager = mgr
|
||||
|
||||
@@ -1,28 +1,23 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eko/gocache/lib/v4/cache"
|
||||
"github.com/eko/gocache/lib/v4/store"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
nbcache "github.com/netbirdio/netbird/management/server/cache"
|
||||
)
|
||||
|
||||
// OneTimeTokenStore manages short-lived, single-use authentication tokens
|
||||
// for proxy-to-management RPC authentication. Tokens are generated when
|
||||
// a service is created and must be used exactly once by the proxy
|
||||
// to authenticate a subsequent RPC call.
|
||||
type OneTimeTokenStore struct {
|
||||
tokens map[string]*tokenMetadata
|
||||
mu sync.RWMutex
|
||||
cleanup *time.Ticker
|
||||
cleanupDone chan struct{}
|
||||
}
|
||||
|
||||
// tokenMetadata stores information about a one-time token
|
||||
type tokenMetadata struct {
|
||||
ServiceID string
|
||||
AccountID string
|
||||
@@ -30,20 +25,24 @@ type tokenMetadata struct {
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// NewOneTimeTokenStore creates a new token store with automatic cleanup
|
||||
// of expired tokens. The cleanupInterval determines how often expired
|
||||
// tokens are removed from memory.
|
||||
func NewOneTimeTokenStore(cleanupInterval time.Duration) *OneTimeTokenStore {
|
||||
store := &OneTimeTokenStore{
|
||||
tokens: make(map[string]*tokenMetadata),
|
||||
cleanup: time.NewTicker(cleanupInterval),
|
||||
cleanupDone: make(chan struct{}),
|
||||
// OneTimeTokenStore manages single-use authentication tokens for proxy-to-management RPC.
|
||||
// Supports both in-memory and Redis storage via NB_IDP_CACHE_REDIS_ADDRESS env var.
|
||||
type OneTimeTokenStore struct {
|
||||
cache *cache.Cache[string]
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewOneTimeTokenStore creates a token store with automatic backend selection
|
||||
func NewOneTimeTokenStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (*OneTimeTokenStore, error) {
|
||||
cacheStore, err := nbcache.NewStore(ctx, maxTimeout, cleanupInterval, maxConn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache store: %w", err)
|
||||
}
|
||||
|
||||
// Start background cleanup goroutine
|
||||
go store.cleanupExpired()
|
||||
|
||||
return store
|
||||
return &OneTimeTokenStore{
|
||||
cache: cache.New[string](cacheStore),
|
||||
ctx: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GenerateToken creates a new cryptographically secure one-time token
|
||||
@@ -52,25 +51,30 @@ func NewOneTimeTokenStore(cleanupInterval time.Duration) *OneTimeTokenStore {
|
||||
//
|
||||
// Returns the generated token string or an error if random generation fails.
|
||||
func (s *OneTimeTokenStore) GenerateToken(accountID, serviceID string, ttl time.Duration) (string, error) {
|
||||
// Generate 32 bytes (256 bits) of cryptographically secure random data
|
||||
randomBytes := make([]byte, 32)
|
||||
if _, err := rand.Read(randomBytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate random token: %w", err)
|
||||
}
|
||||
|
||||
// Encode as URL-safe base64 for easy transmission in gRPC
|
||||
token := base64.URLEncoding.EncodeToString(randomBytes)
|
||||
hashedToken := hashToken(token)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.tokens[token] = &tokenMetadata{
|
||||
metadata := &tokenMetadata{
|
||||
ServiceID: serviceID,
|
||||
AccountID: accountID,
|
||||
ExpiresAt: time.Now().Add(ttl),
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
metadataJSON, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to serialize token metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := s.cache.Set(s.ctx, hashedToken, string(metadataJSON), store.WithExpiration(ttl)); err != nil {
|
||||
return "", fmt.Errorf("failed to store token: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("Generated one-time token for proxy %s in account %s (expires in %s)",
|
||||
serviceID, accountID, ttl)
|
||||
|
||||
@@ -88,80 +92,45 @@ func (s *OneTimeTokenStore) GenerateToken(accountID, serviceID string, ttl time.
|
||||
// - Account ID doesn't match
|
||||
// - Reverse proxy ID doesn't match
|
||||
func (s *OneTimeTokenStore) ValidateAndConsume(token, accountID, serviceID string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
hashedToken := hashToken(token)
|
||||
|
||||
metadata, exists := s.tokens[token]
|
||||
if !exists {
|
||||
log.Warnf("Token validation failed: token not found (proxy: %s, account: %s)",
|
||||
serviceID, accountID)
|
||||
metadataJSON, err := s.cache.Get(s.ctx, hashedToken)
|
||||
if err != nil {
|
||||
log.Warnf("Token validation failed: token not found (proxy: %s, account: %s)", serviceID, accountID)
|
||||
return fmt.Errorf("invalid token")
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
metadata := &tokenMetadata{}
|
||||
if err := json.Unmarshal([]byte(metadataJSON), metadata); err != nil {
|
||||
log.Warnf("Token validation failed: failed to unmarshal metadata (proxy: %s, account: %s): %v", serviceID, accountID, err)
|
||||
return fmt.Errorf("invalid token metadata")
|
||||
}
|
||||
|
||||
if time.Now().After(metadata.ExpiresAt) {
|
||||
delete(s.tokens, token)
|
||||
log.Warnf("Token validation failed: token expired (proxy: %s, account: %s)",
|
||||
serviceID, accountID)
|
||||
log.Warnf("Token validation failed: token expired (proxy: %s, account: %s)", serviceID, accountID)
|
||||
return fmt.Errorf("token expired")
|
||||
}
|
||||
|
||||
// Validate account ID using constant-time comparison (prevents timing attacks)
|
||||
if subtle.ConstantTimeCompare([]byte(metadata.AccountID), []byte(accountID)) != 1 {
|
||||
log.Warnf("Token validation failed: account ID mismatch (expected: %s, got: %s)",
|
||||
metadata.AccountID, accountID)
|
||||
log.Warnf("Token validation failed: account ID mismatch (expected: %s, got: %s)", metadata.AccountID, accountID)
|
||||
return fmt.Errorf("account ID mismatch")
|
||||
}
|
||||
|
||||
// Validate service ID using constant-time comparison
|
||||
if subtle.ConstantTimeCompare([]byte(metadata.ServiceID), []byte(serviceID)) != 1 {
|
||||
log.Warnf("Token validation failed: service ID mismatch (expected: %s, got: %s)",
|
||||
metadata.ServiceID, serviceID)
|
||||
log.Warnf("Token validation failed: service ID mismatch (expected: %s, got: %s)", metadata.ServiceID, serviceID)
|
||||
return fmt.Errorf("service ID mismatch")
|
||||
}
|
||||
|
||||
// Delete token immediately to enforce single-use
|
||||
delete(s.tokens, token)
|
||||
if err := s.cache.Delete(s.ctx, hashedToken); err != nil {
|
||||
log.Warnf("Token deletion warning (proxy: %s, account: %s): %v", serviceID, accountID, err)
|
||||
}
|
||||
|
||||
log.Infof("Token validated and consumed for proxy %s in account %s",
|
||||
serviceID, accountID)
|
||||
log.Infof("Token validated and consumed for proxy %s in account %s", serviceID, accountID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupExpired removes expired tokens in the background to prevent memory leaks
|
||||
func (s *OneTimeTokenStore) cleanupExpired() {
|
||||
for {
|
||||
select {
|
||||
case <-s.cleanup.C:
|
||||
s.mu.Lock()
|
||||
now := time.Now()
|
||||
removed := 0
|
||||
for token, metadata := range s.tokens {
|
||||
if now.After(metadata.ExpiresAt) {
|
||||
delete(s.tokens, token)
|
||||
removed++
|
||||
}
|
||||
}
|
||||
if removed > 0 {
|
||||
log.Debugf("Cleaned up %d expired one-time tokens", removed)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
case <-s.cleanupDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close stops the cleanup goroutine and releases resources
|
||||
func (s *OneTimeTokenStore) Close() {
|
||||
s.cleanup.Stop()
|
||||
close(s.cleanupDone)
|
||||
}
|
||||
|
||||
// GetTokenCount returns the current number of tokens in the store (for debugging/metrics)
|
||||
func (s *OneTimeTokenStore) GetTokenCount() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return len(s.tokens)
|
||||
func hashToken(token string) string {
|
||||
hash := sha256.Sum256([]byte(token))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
61
management/internals/shared/grpc/pkce_verifier.go
Normal file
61
management/internals/shared/grpc/pkce_verifier.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/eko/gocache/lib/v4/cache"
|
||||
"github.com/eko/gocache/lib/v4/store"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
nbcache "github.com/netbirdio/netbird/management/server/cache"
|
||||
)
|
||||
|
||||
// PKCEVerifierStore manages PKCE verifiers for OAuth flows.
|
||||
// Supports both in-memory and Redis storage via NB_IDP_CACHE_REDIS_ADDRESS env var.
|
||||
type PKCEVerifierStore struct {
|
||||
cache *cache.Cache[string]
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewPKCEVerifierStore creates a PKCE verifier store with automatic backend selection
|
||||
func NewPKCEVerifierStore(ctx context.Context, maxTimeout, cleanupInterval time.Duration, maxConn int) (*PKCEVerifierStore, error) {
|
||||
cacheStore, err := nbcache.NewStore(ctx, maxTimeout, cleanupInterval, maxConn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache store: %w", err)
|
||||
}
|
||||
|
||||
return &PKCEVerifierStore{
|
||||
cache: cache.New[string](cacheStore),
|
||||
ctx: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Store saves a PKCE verifier associated with an OAuth state parameter.
|
||||
// The verifier is stored with the specified TTL and will be automatically deleted after expiration.
|
||||
func (s *PKCEVerifierStore) Store(state, verifier string, ttl time.Duration) error {
|
||||
if err := s.cache.Set(s.ctx, state, verifier, store.WithExpiration(ttl)); err != nil {
|
||||
return fmt.Errorf("failed to store PKCE verifier: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("Stored PKCE verifier for state (expires in %s)", ttl)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadAndDelete retrieves and removes a PKCE verifier for the given state.
|
||||
// Returns the verifier and true if found, or empty string and false if not found.
|
||||
// This enforces single-use semantics for PKCE verifiers.
|
||||
func (s *PKCEVerifierStore) LoadAndDelete(state string) (string, bool) {
|
||||
verifier, err := s.cache.Get(s.ctx, state)
|
||||
if err != nil {
|
||||
log.Debugf("PKCE verifier not found for state")
|
||||
return "", false
|
||||
}
|
||||
|
||||
if err := s.cache.Delete(s.ctx, state); err != nil {
|
||||
log.Warnf("Failed to delete PKCE verifier for state: %v", err)
|
||||
}
|
||||
|
||||
return verifier, true
|
||||
}
|
||||
@@ -18,14 +18,14 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/netbirdio/netbird/shared/management/domain"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/peers"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey"
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
"github.com/netbirdio/netbird/management/server/users"
|
||||
@@ -58,14 +58,17 @@ type ProxyServiceServer struct {
|
||||
// Map of connected proxies: proxy_id -> proxy connection
|
||||
connectedProxies sync.Map
|
||||
|
||||
// Map of cluster address -> set of proxy IDs
|
||||
clusterProxies sync.Map
|
||||
|
||||
// Manager for access logs
|
||||
accessLogManager accesslogs.Manager
|
||||
|
||||
// Manager for reverse proxy operations
|
||||
reverseProxyManager reverseproxy.Manager
|
||||
serviceManager rpservice.Manager
|
||||
|
||||
// ProxyController for service updates and cluster management
|
||||
proxyController proxy.Controller
|
||||
|
||||
// Manager for proxy connections
|
||||
proxyManager proxy.Manager
|
||||
|
||||
// Manager for peers
|
||||
peersManager peers.Manager
|
||||
@@ -79,20 +82,12 @@ type ProxyServiceServer struct {
|
||||
// OIDC configuration for proxy authentication
|
||||
oidcConfig ProxyOIDCConfig
|
||||
|
||||
// TODO: use database to store these instead?
|
||||
// pkceVerifiers stores PKCE code verifiers keyed by OAuth state.
|
||||
// Entries expire after pkceVerifierTTL to prevent unbounded growth.
|
||||
pkceVerifiers sync.Map
|
||||
pkceCleanupCancel context.CancelFunc
|
||||
// Store for PKCE verifiers
|
||||
pkceVerifierStore *PKCEVerifierStore
|
||||
}
|
||||
|
||||
const pkceVerifierTTL = 10 * time.Minute
|
||||
|
||||
type pkceEntry struct {
|
||||
verifier string
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
// proxyConnection represents a connected proxy
|
||||
type proxyConnection struct {
|
||||
proxyID string
|
||||
@@ -104,58 +99,50 @@ type proxyConnection struct {
|
||||
}
|
||||
|
||||
// NewProxyServiceServer creates a new proxy service server.
|
||||
func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager) *ProxyServiceServer {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
func NewProxyServiceServer(accessLogMgr accesslogs.Manager, tokenStore *OneTimeTokenStore, pkceStore *PKCEVerifierStore, oidcConfig ProxyOIDCConfig, peersManager peers.Manager, usersManager users.Manager, proxyMgr proxy.Manager) *ProxyServiceServer {
|
||||
ctx := context.Background()
|
||||
s := &ProxyServiceServer{
|
||||
accessLogManager: accessLogMgr,
|
||||
oidcConfig: oidcConfig,
|
||||
tokenStore: tokenStore,
|
||||
pkceVerifierStore: pkceStore,
|
||||
peersManager: peersManager,
|
||||
usersManager: usersManager,
|
||||
pkceCleanupCancel: cancel,
|
||||
proxyManager: proxyMgr,
|
||||
}
|
||||
go s.cleanupPKCEVerifiers(ctx)
|
||||
go s.cleanupStaleProxies(ctx)
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanupPKCEVerifiers periodically removes expired PKCE verifiers.
|
||||
func (s *ProxyServiceServer) cleanupPKCEVerifiers(ctx context.Context) {
|
||||
ticker := time.NewTicker(pkceVerifierTTL)
|
||||
// cleanupStaleProxies periodically removes proxies that haven't sent heartbeat in 10 minutes
|
||||
func (s *ProxyServiceServer) cleanupStaleProxies(ctx context.Context) {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
now := time.Now()
|
||||
s.pkceVerifiers.Range(func(key, value any) bool {
|
||||
if entry, ok := value.(pkceEntry); ok && now.Sub(entry.createdAt) > pkceVerifierTTL {
|
||||
s.pkceVerifiers.Delete(key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err := s.proxyManager.CleanupStale(ctx, 10*time.Minute); err != nil {
|
||||
log.WithContext(ctx).Debugf("Failed to cleanup stale proxies: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close stops background goroutines.
|
||||
func (s *ProxyServiceServer) Close() {
|
||||
s.pkceCleanupCancel()
|
||||
func (s *ProxyServiceServer) SetServiceManager(manager rpservice.Manager) {
|
||||
s.serviceManager = manager
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) SetProxyManager(manager reverseproxy.Manager) {
|
||||
s.reverseProxyManager = manager
|
||||
func (s *ProxyServiceServer) SetProxyController(proxyController proxy.Controller) {
|
||||
s.proxyController = proxyController
|
||||
}
|
||||
|
||||
// GetMappingUpdate handles the control stream with proxy clients
|
||||
func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest, stream proto.ProxyService_GetMappingUpdateServer) error {
|
||||
ctx := stream.Context()
|
||||
|
||||
peerInfo := ""
|
||||
if p, ok := peer.FromContext(ctx); ok {
|
||||
peerInfo = p.Addr.String()
|
||||
}
|
||||
|
||||
peerInfo := PeerIPFromContext(ctx)
|
||||
log.Infof("New proxy connection from %s", peerInfo)
|
||||
|
||||
proxyID := req.GetProxyId()
|
||||
@@ -179,7 +166,15 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest
|
||||
}
|
||||
|
||||
s.connectedProxies.Store(proxyID, conn)
|
||||
s.addToCluster(conn.address, proxyID)
|
||||
if err := s.proxyController.RegisterProxyToCluster(ctx, conn.address, proxyID); err != nil {
|
||||
log.WithContext(ctx).Warnf("Failed to register proxy %s in cluster: %v", proxyID, err)
|
||||
}
|
||||
|
||||
// Register proxy in database
|
||||
if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo); err != nil {
|
||||
log.WithContext(ctx).Warnf("Failed to register proxy %s in database: %v", proxyID, err)
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"proxy_id": proxyID,
|
||||
"address": proxyAddress,
|
||||
@@ -187,8 +182,15 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest
|
||||
"total_proxies": len(s.GetConnectedProxies()),
|
||||
}).Info("Proxy registered in cluster")
|
||||
defer func() {
|
||||
if err := s.proxyManager.Disconnect(context.Background(), proxyID); err != nil {
|
||||
log.Warnf("Failed to mark proxy %s as disconnected: %v", proxyID, err)
|
||||
}
|
||||
|
||||
s.connectedProxies.Delete(proxyID)
|
||||
s.removeFromCluster(conn.address, proxyID)
|
||||
if err := s.proxyController.UnregisterProxyFromCluster(context.Background(), conn.address, proxyID); err != nil {
|
||||
log.Warnf("Failed to unregister proxy %s from cluster: %v", proxyID, err)
|
||||
}
|
||||
|
||||
cancel()
|
||||
log.Infof("Proxy %s disconnected", proxyID)
|
||||
}()
|
||||
@@ -200,6 +202,9 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest
|
||||
errChan := make(chan error, 2)
|
||||
go s.sender(conn, errChan)
|
||||
|
||||
// Start heartbeat goroutine
|
||||
go s.heartbeat(connCtx, proxyID)
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return fmt.Errorf("send update to proxy %s: %w", proxyID, err)
|
||||
@@ -208,10 +213,27 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest
|
||||
}
|
||||
}
|
||||
|
||||
// heartbeat updates the proxy's last_seen timestamp every minute
|
||||
func (s *ProxyServiceServer) heartbeat(ctx context.Context, proxyID string) {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := s.proxyManager.Heartbeat(ctx, proxyID); err != nil {
|
||||
log.WithContext(ctx).Debugf("Failed to update proxy %s heartbeat: %v", proxyID, err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendSnapshot sends the initial snapshot of services to the connecting proxy.
|
||||
// Only services matching the proxy's cluster address are sent.
|
||||
func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnection) error {
|
||||
services, err := s.reverseProxyManager.GetGlobalServices(ctx)
|
||||
services, err := s.serviceManager.GetGlobalServices(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get services from store: %w", err)
|
||||
}
|
||||
@@ -220,7 +242,7 @@ func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnec
|
||||
return fmt.Errorf("proxy address is invalid")
|
||||
}
|
||||
|
||||
var filtered []*reverseproxy.Service
|
||||
var filtered []*rpservice.Service
|
||||
for _, service := range services {
|
||||
if !service.Enabled {
|
||||
continue
|
||||
@@ -255,7 +277,7 @@ func (s *ProxyServiceServer) sendSnapshot(ctx context.Context, conn *proxyConnec
|
||||
if err := conn.stream.Send(&proto.GetMappingUpdateResponse{
|
||||
Mapping: []*proto.ProxyMapping{
|
||||
service.ToProtoMapping(
|
||||
reverseproxy.Create, // Initial snapshot, all records are "new" for the proxy.
|
||||
rpservice.Create, // Initial snapshot, all records are "new" for the proxy.
|
||||
token,
|
||||
s.GetOIDCValidationConfig(),
|
||||
),
|
||||
@@ -389,61 +411,47 @@ func (s *ProxyServiceServer) GetConnectedProxyURLs() []string {
|
||||
return urls
|
||||
}
|
||||
|
||||
// addToCluster registers a proxy in a cluster.
|
||||
func (s *ProxyServiceServer) addToCluster(clusterAddr, proxyID string) {
|
||||
if clusterAddr == "" {
|
||||
return
|
||||
}
|
||||
proxySet, _ := s.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{})
|
||||
proxySet.(*sync.Map).Store(proxyID, struct{}{})
|
||||
log.Debugf("Added proxy %s to cluster %s", proxyID, clusterAddr)
|
||||
}
|
||||
|
||||
// removeFromCluster removes a proxy from a cluster.
|
||||
func (s *ProxyServiceServer) removeFromCluster(clusterAddr, proxyID string) {
|
||||
if clusterAddr == "" {
|
||||
return
|
||||
}
|
||||
if proxySet, ok := s.clusterProxies.Load(clusterAddr); ok {
|
||||
proxySet.(*sync.Map).Delete(proxyID)
|
||||
log.Debugf("Removed proxy %s from cluster %s", proxyID, clusterAddr)
|
||||
}
|
||||
}
|
||||
|
||||
// SendServiceUpdateToCluster sends a service update to all proxy servers in a specific cluster.
|
||||
// If clusterAddr is empty, broadcasts to all connected proxy servers (backward compatibility).
|
||||
// For create/update operations a unique one-time auth token is generated per
|
||||
// proxy so that every replica can independently authenticate with management.
|
||||
func (s *ProxyServiceServer) SendServiceUpdateToCluster(update *proto.GetMappingUpdateResponse, clusterAddr string) {
|
||||
func (s *ProxyServiceServer) SendServiceUpdateToCluster(ctx context.Context, update *proto.ProxyMapping, clusterAddr string) {
|
||||
updateResponse := &proto.GetMappingUpdateResponse{
|
||||
Mapping: []*proto.ProxyMapping{update},
|
||||
}
|
||||
|
||||
if clusterAddr == "" {
|
||||
s.SendServiceUpdate(update)
|
||||
s.SendServiceUpdate(updateResponse)
|
||||
return
|
||||
}
|
||||
|
||||
proxySet, ok := s.clusterProxies.Load(clusterAddr)
|
||||
if !ok {
|
||||
log.Debugf("No proxies connected for cluster %s", clusterAddr)
|
||||
if s.proxyController == nil {
|
||||
log.WithContext(ctx).Debugf("ProxyController not set, cannot send to cluster %s", clusterAddr)
|
||||
return
|
||||
}
|
||||
|
||||
proxyIDs := s.proxyController.GetProxiesForCluster(clusterAddr)
|
||||
if len(proxyIDs) == 0 {
|
||||
log.WithContext(ctx).Debugf("No proxies connected for cluster %s", clusterAddr)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Sending service update to cluster %s", clusterAddr)
|
||||
proxySet.(*sync.Map).Range(func(key, _ interface{}) bool {
|
||||
proxyID := key.(string)
|
||||
for _, proxyID := range proxyIDs {
|
||||
if connVal, ok := s.connectedProxies.Load(proxyID); ok {
|
||||
conn := connVal.(*proxyConnection)
|
||||
msg := s.perProxyMessage(update, proxyID)
|
||||
msg := s.perProxyMessage(updateResponse, proxyID)
|
||||
if msg == nil {
|
||||
return true
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case conn.sendChan <- msg:
|
||||
log.Debugf("Sent service update to proxy %s in cluster %s", proxyID, clusterAddr)
|
||||
log.WithContext(ctx).Debugf("Sent service update with id %s to proxy %s in cluster %s", update.Id, proxyID, clusterAddr)
|
||||
default:
|
||||
log.Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr)
|
||||
log.WithContext(ctx).Warnf("Failed to send service update to proxy %s in cluster %s (channel full)", proxyID, clusterAddr)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// perProxyMessage returns a copy of update with a fresh one-time token for
|
||||
@@ -490,35 +498,8 @@ func shallowCloneMapping(m *proto.ProxyMapping) *proto.ProxyMapping {
|
||||
}
|
||||
}
|
||||
|
||||
// GetAvailableClusters returns information about all connected proxy clusters.
|
||||
func (s *ProxyServiceServer) GetAvailableClusters() []ClusterInfo {
|
||||
clusterCounts := make(map[string]int)
|
||||
s.clusterProxies.Range(func(key, value interface{}) bool {
|
||||
clusterAddr := key.(string)
|
||||
proxySet := value.(*sync.Map)
|
||||
count := 0
|
||||
proxySet.Range(func(_, _ interface{}) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
if count > 0 {
|
||||
clusterCounts[clusterAddr] = count
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
clusters := make([]ClusterInfo, 0, len(clusterCounts))
|
||||
for addr, count := range clusterCounts {
|
||||
clusters = append(clusters, ClusterInfo{
|
||||
Address: addr,
|
||||
ConnectedProxies: count,
|
||||
})
|
||||
}
|
||||
return clusters
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.AuthenticateRequest) (*proto.AuthenticateResponse, error) {
|
||||
service, err := s.reverseProxyManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId())
|
||||
service, err := s.serviceManager.GetServiceByID(ctx, req.GetAccountId(), req.GetId())
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Debugf("failed to get service from store: %v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "get service from store: %v", err)
|
||||
@@ -537,7 +518,7 @@ func (s *ProxyServiceServer) Authenticate(ctx context.Context, req *proto.Authen
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto.AuthenticateRequest, service *reverseproxy.Service) (bool, string, proxyauth.Method) {
|
||||
func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto.AuthenticateRequest, service *rpservice.Service) (bool, string, proxyauth.Method) {
|
||||
switch v := req.GetRequest().(type) {
|
||||
case *proto.AuthenticateRequest_Pin:
|
||||
return s.authenticatePIN(ctx, req.GetId(), v, service.Auth.PinAuth)
|
||||
@@ -548,7 +529,7 @@ func (s *ProxyServiceServer) authenticateRequest(ctx context.Context, req *proto
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Pin, auth *reverseproxy.PINAuthConfig) (bool, string, proxyauth.Method) {
|
||||
func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Pin, auth *rpservice.PINAuthConfig) (bool, string, proxyauth.Method) {
|
||||
if auth == nil || !auth.Enabled {
|
||||
log.WithContext(ctx).Debugf("PIN authentication attempted but not enabled for service %s", serviceID)
|
||||
return false, "", ""
|
||||
@@ -562,7 +543,7 @@ func (s *ProxyServiceServer) authenticatePIN(ctx context.Context, serviceID stri
|
||||
return true, "pin-user", proxyauth.MethodPIN
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) authenticatePassword(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Password, auth *reverseproxy.PasswordAuthConfig) (bool, string, proxyauth.Method) {
|
||||
func (s *ProxyServiceServer) authenticatePassword(ctx context.Context, serviceID string, req *proto.AuthenticateRequest_Password, auth *rpservice.PasswordAuthConfig) (bool, string, proxyauth.Method) {
|
||||
if auth == nil || !auth.Enabled {
|
||||
log.WithContext(ctx).Debugf("password authentication attempted but not enabled for service %s", serviceID)
|
||||
return false, "", ""
|
||||
@@ -584,7 +565,7 @@ func (s *ProxyServiceServer) logAuthenticationError(ctx context.Context, err err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) generateSessionToken(ctx context.Context, authenticated bool, service *reverseproxy.Service, userId string, method proxyauth.Method) (string, error) {
|
||||
func (s *ProxyServiceServer) generateSessionToken(ctx context.Context, authenticated bool, service *rpservice.Service, userId string, method proxyauth.Method) (string, error) {
|
||||
if !authenticated || service.SessionPrivateKey == "" {
|
||||
return "", nil
|
||||
}
|
||||
@@ -624,7 +605,7 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se
|
||||
}
|
||||
|
||||
if certificateIssued {
|
||||
if err := s.reverseProxyManager.SetCertificateIssuedAt(ctx, accountID, serviceID); err != nil {
|
||||
if err := s.serviceManager.SetCertificateIssuedAt(ctx, accountID, serviceID); err != nil {
|
||||
log.WithContext(ctx).WithError(err).Error("failed to set certificate issued timestamp")
|
||||
return nil, status.Errorf(codes.Internal, "update certificate timestamp: %v", err)
|
||||
}
|
||||
@@ -636,7 +617,7 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se
|
||||
|
||||
internalStatus := protoStatusToInternal(protoStatus)
|
||||
|
||||
if err := s.reverseProxyManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil {
|
||||
if err := s.serviceManager.SetStatus(ctx, accountID, serviceID, internalStatus); err != nil {
|
||||
log.WithContext(ctx).WithError(err).Error("failed to update service status")
|
||||
return nil, status.Errorf(codes.Internal, "update service status: %v", err)
|
||||
}
|
||||
@@ -651,22 +632,22 @@ func (s *ProxyServiceServer) SendStatusUpdate(ctx context.Context, req *proto.Se
|
||||
}
|
||||
|
||||
// protoStatusToInternal maps proto status to internal status
|
||||
func protoStatusToInternal(protoStatus proto.ProxyStatus) reverseproxy.ProxyStatus {
|
||||
func protoStatusToInternal(protoStatus proto.ProxyStatus) rpservice.Status {
|
||||
switch protoStatus {
|
||||
case proto.ProxyStatus_PROXY_STATUS_PENDING:
|
||||
return reverseproxy.StatusPending
|
||||
return rpservice.StatusPending
|
||||
case proto.ProxyStatus_PROXY_STATUS_ACTIVE:
|
||||
return reverseproxy.StatusActive
|
||||
return rpservice.StatusActive
|
||||
case proto.ProxyStatus_PROXY_STATUS_TUNNEL_NOT_CREATED:
|
||||
return reverseproxy.StatusTunnelNotCreated
|
||||
return rpservice.StatusTunnelNotCreated
|
||||
case proto.ProxyStatus_PROXY_STATUS_CERTIFICATE_PENDING:
|
||||
return reverseproxy.StatusCertificatePending
|
||||
return rpservice.StatusCertificatePending
|
||||
case proto.ProxyStatus_PROXY_STATUS_CERTIFICATE_FAILED:
|
||||
return reverseproxy.StatusCertificateFailed
|
||||
return rpservice.StatusCertificateFailed
|
||||
case proto.ProxyStatus_PROXY_STATUS_ERROR:
|
||||
return reverseproxy.StatusError
|
||||
return rpservice.StatusError
|
||||
default:
|
||||
return reverseproxy.StatusError
|
||||
return rpservice.StatusError
|
||||
}
|
||||
}
|
||||
|
||||
@@ -731,7 +712,7 @@ func (s *ProxyServiceServer) GetOIDCURL(ctx context.Context, req *proto.GetOIDCU
|
||||
return nil, status.Errorf(codes.InvalidArgument, "parse redirect url: %v", err)
|
||||
}
|
||||
// Validate redirectURL against known service endpoints to avoid abuse of OIDC redirection.
|
||||
services, err := s.reverseProxyManager.GetAccountServices(ctx, req.GetAccountId())
|
||||
services, err := s.serviceManager.GetAccountServices(ctx, req.GetAccountId())
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get account services: %v", err)
|
||||
return nil, status.Errorf(codes.FailedPrecondition, "get account services: %v", err)
|
||||
@@ -775,7 +756,10 @@ func (s *ProxyServiceServer) GetOIDCURL(ctx context.Context, req *proto.GetOIDCU
|
||||
state := fmt.Sprintf("%s|%s|%s", base64.URLEncoding.EncodeToString([]byte(redirectURL.String())), nonceB64, hmacSum)
|
||||
|
||||
codeVerifier := oauth2.GenerateVerifier()
|
||||
s.pkceVerifiers.Store(state, pkceEntry{verifier: codeVerifier, createdAt: time.Now()})
|
||||
if err := s.pkceVerifierStore.Store(state, codeVerifier, pkceVerifierTTL); err != nil {
|
||||
log.WithContext(ctx).Errorf("failed to store PKCE verifier: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "store PKCE verifier: %v", err)
|
||||
}
|
||||
|
||||
return &proto.GetOIDCURLResponse{
|
||||
Url: (&oauth2.Config{
|
||||
@@ -794,8 +778,8 @@ func (s *ProxyServiceServer) GetOIDCConfig() ProxyOIDCConfig {
|
||||
|
||||
// GetOIDCValidationConfig returns the OIDC configuration for token validation
|
||||
// in the format needed by ToProtoMapping.
|
||||
func (s *ProxyServiceServer) GetOIDCValidationConfig() reverseproxy.OIDCValidationConfig {
|
||||
return reverseproxy.OIDCValidationConfig{
|
||||
func (s *ProxyServiceServer) GetOIDCValidationConfig() proxy.OIDCValidationConfig {
|
||||
return proxy.OIDCValidationConfig{
|
||||
Issuer: s.oidcConfig.Issuer,
|
||||
Audiences: []string{s.oidcConfig.Audience},
|
||||
KeysLocation: s.oidcConfig.KeysLocation,
|
||||
@@ -812,18 +796,10 @@ func (s *ProxyServiceServer) generateHMAC(input string) string {
|
||||
// ValidateState validates the state parameter from an OAuth callback.
|
||||
// Returns the original redirect URL if valid, or an error if invalid.
|
||||
func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL string, err error) {
|
||||
v, ok := s.pkceVerifiers.LoadAndDelete(state)
|
||||
verifier, ok := s.pkceVerifierStore.LoadAndDelete(state)
|
||||
if !ok {
|
||||
return "", "", errors.New("no verifier for state")
|
||||
}
|
||||
entry, ok := v.(pkceEntry)
|
||||
if !ok {
|
||||
return "", "", errors.New("invalid verifier for state")
|
||||
}
|
||||
if time.Since(entry.createdAt) > pkceVerifierTTL {
|
||||
return "", "", errors.New("PKCE verifier expired")
|
||||
}
|
||||
verifier = entry.verifier
|
||||
|
||||
// State format: base64(redirectURL)|nonce|hmac(redirectURL|nonce)
|
||||
parts := strings.Split(state, "|")
|
||||
@@ -854,12 +830,12 @@ func (s *ProxyServiceServer) ValidateState(state string) (verifier, redirectURL
|
||||
// GenerateSessionToken creates a signed session JWT for the given domain and user.
|
||||
func (s *ProxyServiceServer) GenerateSessionToken(ctx context.Context, domain, userID string, method proxyauth.Method) (string, error) {
|
||||
// Find the service by domain to get its signing key
|
||||
services, err := s.reverseProxyManager.GetGlobalServices(ctx)
|
||||
services, err := s.serviceManager.GetGlobalServices(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get services: %w", err)
|
||||
}
|
||||
|
||||
var service *reverseproxy.Service
|
||||
var service *rpservice.Service
|
||||
for _, svc := range services {
|
||||
if svc.Domain == domain {
|
||||
service = svc
|
||||
@@ -925,8 +901,8 @@ func (s *ProxyServiceServer) ValidateUserGroupAccess(ctx context.Context, domain
|
||||
return fmt.Errorf("user %s not in allowed groups for domain %s", user.Id, domain)
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) getAccountServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) {
|
||||
services, err := s.reverseProxyManager.GetAccountServices(ctx, accountID)
|
||||
func (s *ProxyServiceServer) getAccountServiceByDomain(ctx context.Context, accountID, domain string) (*rpservice.Service, error) {
|
||||
services, err := s.serviceManager.GetAccountServices(ctx, accountID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get account services: %w", err)
|
||||
}
|
||||
@@ -1047,8 +1023,8 @@ func (s *ProxyServiceServer) ValidateSession(ctx context.Context, req *proto.Val
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain string) (*reverseproxy.Service, error) {
|
||||
services, err := s.reverseProxyManager.GetGlobalServices(ctx)
|
||||
func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain string) (*rpservice.Service, error) {
|
||||
services, err := s.serviceManager.GetGlobalServices(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get services: %w", err)
|
||||
}
|
||||
@@ -1062,7 +1038,7 @@ func (s *ProxyServiceServer) getServiceByDomain(ctx context.Context, domain stri
|
||||
return nil, fmt.Errorf("service not found for domain: %s", domain)
|
||||
}
|
||||
|
||||
func (s *ProxyServiceServer) checkGroupAccess(service *reverseproxy.Service, user *types.User) error {
|
||||
func (s *ProxyServiceServer) checkGroupAccess(service *rpservice.Service, user *types.User) error {
|
||||
if service.Auth.BearerAuth == nil || !service.Auth.BearerAuth.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ func NewProxyAuthInterceptors(tokenStore proxyTokenStore) (grpc.UnaryServerInter
|
||||
}
|
||||
|
||||
func (i *proxyAuthInterceptor) validateProxyToken(ctx context.Context) (*types.ProxyAccessToken, error) {
|
||||
clientIP := peerIPFromContext(ctx)
|
||||
clientIP := PeerIPFromContext(ctx)
|
||||
|
||||
if clientIP != "" && i.failureLimiter.isLimited(clientIP) {
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "too many failed authentication attempts")
|
||||
|
||||
@@ -115,9 +115,9 @@ func (l *authFailureLimiter) stop() {
|
||||
l.cancel()
|
||||
}
|
||||
|
||||
// peerIPFromContext extracts the client IP from the gRPC context.
|
||||
// PeerIPFromContext extracts the client IP from the gRPC context.
|
||||
// Uses realip (from trusted proxy headers) first, falls back to the transport peer address.
|
||||
func peerIPFromContext(ctx context.Context) clientIP {
|
||||
func PeerIPFromContext(ctx context.Context) string {
|
||||
if addr, ok := realip.FromContext(ctx); ok {
|
||||
return addr.String()
|
||||
}
|
||||
|
||||
@@ -8,12 +8,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
)
|
||||
|
||||
type mockReverseProxyManager struct {
|
||||
proxiesByAccount map[string][]*reverseproxy.Service
|
||||
proxiesByAccount map[string][]*service.Service
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -21,31 +21,31 @@ func (m *mockReverseProxyManager) DeleteAllServices(ctx context.Context, account
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (m *mockReverseProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
return m.proxiesByAccount[accountID], nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) {
|
||||
func (m *mockReverseProxyManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*reverseproxy.Service, error) {
|
||||
return []*reverseproxy.Service{}, nil
|
||||
func (m *mockReverseProxyManager) GetAllServices(ctx context.Context, accountID, userID string) ([]*service.Service, error) {
|
||||
return []*service.Service{}, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) GetService(ctx context.Context, accountID, userID, reverseProxyID string) (*reverseproxy.Service, error) {
|
||||
return &reverseproxy.Service{}, nil
|
||||
func (m *mockReverseProxyManager) GetService(ctx context.Context, accountID, userID, reverseProxyID string) (*service.Service, error) {
|
||||
return &service.Service{}, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) CreateService(ctx context.Context, accountID, userID string, rp *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
return &reverseproxy.Service{}, nil
|
||||
func (m *mockReverseProxyManager) CreateService(ctx context.Context, accountID, userID string, rp *service.Service) (*service.Service, error) {
|
||||
return &service.Service{}, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) UpdateService(ctx context.Context, accountID, userID string, rp *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
return &reverseproxy.Service{}, nil
|
||||
func (m *mockReverseProxyManager) UpdateService(ctx context.Context, accountID, userID string, rp *service.Service) (*service.Service, error) {
|
||||
return &service.Service{}, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) DeleteService(ctx context.Context, accountID, userID, reverseProxyID string) error {
|
||||
@@ -56,7 +56,7 @@ func (m *mockReverseProxyManager) SetCertificateIssuedAt(ctx context.Context, ac
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) SetStatus(ctx context.Context, accountID, reverseProxyID string, status reverseproxy.ProxyStatus) error {
|
||||
func (m *mockReverseProxyManager) SetStatus(ctx context.Context, accountID, reverseProxyID string, status service.Status) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -68,16 +68,16 @@ func (m *mockReverseProxyManager) ReloadService(ctx context.Context, accountID,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) GetServiceByID(ctx context.Context, accountID, reverseProxyID string) (*reverseproxy.Service, error) {
|
||||
return &reverseproxy.Service{}, nil
|
||||
func (m *mockReverseProxyManager) GetServiceByID(ctx context.Context, accountID, reverseProxyID string) (*service.Service, error) {
|
||||
return &service.Service{}, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) {
|
||||
return &reverseproxy.ExposeServiceResponse{}, nil
|
||||
func (m *mockReverseProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) {
|
||||
return &service.ExposeServiceResponse{}, nil
|
||||
}
|
||||
|
||||
func (m *mockReverseProxyManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error {
|
||||
@@ -111,7 +111,7 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name string
|
||||
domain string
|
||||
userID string
|
||||
proxiesByAccount map[string][]*reverseproxy.Service
|
||||
proxiesByAccount map[string][]*service.Service
|
||||
users map[string]*types.User
|
||||
proxyErr error
|
||||
userErr error
|
||||
@@ -122,7 +122,7 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "user not found",
|
||||
domain: "app.example.com",
|
||||
userID: "unknown-user",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{Domain: "app.example.com", AccountID: "account1"}},
|
||||
},
|
||||
users: map[string]*types.User{},
|
||||
@@ -133,7 +133,7 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "proxy not found in user's account",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{},
|
||||
proxiesByAccount: map[string][]*service.Service{},
|
||||
users: map[string]*types.User{
|
||||
"user1": {Id: "user1", AccountID: "account1"},
|
||||
},
|
||||
@@ -144,7 +144,7 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "proxy exists in different account - not accessible",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account2": {{Domain: "app.example.com", AccountID: "account2"}},
|
||||
},
|
||||
users: map[string]*types.User{
|
||||
@@ -157,8 +157,8 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "no bearer auth configured - same account allows access",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
"account1": {{Domain: "app.example.com", AccountID: "account1", Auth: reverseproxy.AuthConfig{}}},
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{Domain: "app.example.com", AccountID: "account1", Auth: service.AuthConfig{}}},
|
||||
},
|
||||
users: map[string]*types.User{
|
||||
"user1": {Id: "user1", AccountID: "account1"},
|
||||
@@ -169,12 +169,12 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "bearer auth disabled - same account allows access",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{
|
||||
Domain: "app.example.com",
|
||||
AccountID: "account1",
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{Enabled: false},
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{Enabled: false},
|
||||
},
|
||||
}},
|
||||
},
|
||||
@@ -187,12 +187,12 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "bearer auth enabled but no groups configured - same account allows access",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{
|
||||
Domain: "app.example.com",
|
||||
AccountID: "account1",
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{},
|
||||
},
|
||||
@@ -208,12 +208,12 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "user not in allowed groups",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{
|
||||
Domain: "app.example.com",
|
||||
AccountID: "account1",
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{"group1", "group2"},
|
||||
},
|
||||
@@ -230,12 +230,12 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "user in one of the allowed groups - allow access",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{
|
||||
Domain: "app.example.com",
|
||||
AccountID: "account1",
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{"group1", "group2"},
|
||||
},
|
||||
@@ -251,12 +251,12 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "user in all allowed groups - allow access",
|
||||
domain: "app.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{
|
||||
Domain: "app.example.com",
|
||||
AccountID: "account1",
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{"group1", "group2"},
|
||||
},
|
||||
@@ -284,10 +284,10 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
name: "multiple proxies in account - finds correct one",
|
||||
domain: "app2.example.com",
|
||||
userID: "user1",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {
|
||||
{Domain: "app1.example.com", AccountID: "account1"},
|
||||
{Domain: "app2.example.com", AccountID: "account1", Auth: reverseproxy.AuthConfig{}},
|
||||
{Domain: "app2.example.com", AccountID: "account1", Auth: service.AuthConfig{}},
|
||||
{Domain: "app3.example.com", AccountID: "account1"},
|
||||
},
|
||||
},
|
||||
@@ -301,7 +301,7 @@ func TestValidateUserGroupAccess(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := &ProxyServiceServer{
|
||||
reverseProxyManager: &mockReverseProxyManager{
|
||||
serviceManager: &mockReverseProxyManager{
|
||||
proxiesByAccount: tt.proxiesByAccount,
|
||||
err: tt.proxyErr,
|
||||
},
|
||||
@@ -328,7 +328,7 @@ func TestGetAccountProxyByDomain(t *testing.T) {
|
||||
name string
|
||||
accountID string
|
||||
domain string
|
||||
proxiesByAccount map[string][]*reverseproxy.Service
|
||||
proxiesByAccount map[string][]*service.Service
|
||||
err error
|
||||
expectProxy bool
|
||||
expectErr bool
|
||||
@@ -337,7 +337,7 @@ func TestGetAccountProxyByDomain(t *testing.T) {
|
||||
name: "proxy found",
|
||||
accountID: "account1",
|
||||
domain: "app.example.com",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {
|
||||
{Domain: "other.example.com", AccountID: "account1"},
|
||||
{Domain: "app.example.com", AccountID: "account1"},
|
||||
@@ -350,7 +350,7 @@ func TestGetAccountProxyByDomain(t *testing.T) {
|
||||
name: "proxy not found in account",
|
||||
accountID: "account1",
|
||||
domain: "unknown.example.com",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{
|
||||
proxiesByAccount: map[string][]*service.Service{
|
||||
"account1": {{Domain: "app.example.com", AccountID: "account1"}},
|
||||
},
|
||||
expectProxy: false,
|
||||
@@ -360,7 +360,7 @@ func TestGetAccountProxyByDomain(t *testing.T) {
|
||||
name: "empty proxy list for account",
|
||||
accountID: "account1",
|
||||
domain: "app.example.com",
|
||||
proxiesByAccount: map[string][]*reverseproxy.Service{},
|
||||
proxiesByAccount: map[string][]*service.Service{},
|
||||
expectProxy: false,
|
||||
expectErr: true,
|
||||
},
|
||||
@@ -378,7 +378,7 @@ func TestGetAccountProxyByDomain(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := &ProxyServiceServer{
|
||||
reverseProxyManager: &mockReverseProxyManager{
|
||||
serviceManager: &mockReverseProxyManager{
|
||||
proxiesByAccount: tt.proxiesByAccount,
|
||||
err: tt.err,
|
||||
},
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
@@ -11,9 +12,61 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
type testProxyController struct {
|
||||
mu sync.Mutex
|
||||
clusterProxies map[string]map[string]struct{}
|
||||
}
|
||||
|
||||
func newTestProxyController() *testProxyController {
|
||||
return &testProxyController{
|
||||
clusterProxies: make(map[string]map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testProxyController) SendServiceUpdateToCluster(_ context.Context, _ string, _ *proto.ProxyMapping, _ string) {
|
||||
}
|
||||
|
||||
func (c *testProxyController) GetOIDCValidationConfig() proxy.OIDCValidationConfig {
|
||||
return proxy.OIDCValidationConfig{}
|
||||
}
|
||||
|
||||
func (c *testProxyController) RegisterProxyToCluster(_ context.Context, clusterAddr, proxyID string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if _, ok := c.clusterProxies[clusterAddr]; !ok {
|
||||
c.clusterProxies[clusterAddr] = make(map[string]struct{})
|
||||
}
|
||||
c.clusterProxies[clusterAddr][proxyID] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *testProxyController) UnregisterProxyFromCluster(_ context.Context, clusterAddr, proxyID string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if proxies, ok := c.clusterProxies[clusterAddr]; ok {
|
||||
delete(proxies, proxyID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *testProxyController) GetProxiesForCluster(clusterAddr string) []string {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
proxies, ok := c.clusterProxies[clusterAddr]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(proxies))
|
||||
for id := range proxies {
|
||||
result = append(result, id)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// registerFakeProxy adds a fake proxy connection to the server's internal maps
|
||||
// and returns the channel where messages will be received.
|
||||
func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan *proto.GetMappingUpdateResponse {
|
||||
@@ -25,8 +78,7 @@ func registerFakeProxy(s *ProxyServiceServer, proxyID, clusterAddr string) chan
|
||||
}
|
||||
s.connectedProxies.Store(proxyID, conn)
|
||||
|
||||
proxySet, _ := s.clusterProxies.LoadOrStore(clusterAddr, &sync.Map{})
|
||||
proxySet.(*sync.Map).Store(proxyID, struct{}{})
|
||||
_ = s.proxyController.RegisterProxyToCluster(context.Background(), clusterAddr, proxyID)
|
||||
|
||||
return ch
|
||||
}
|
||||
@@ -41,12 +93,18 @@ func drainChannel(ch chan *proto.GetMappingUpdateResponse) *proto.GetMappingUpda
|
||||
}
|
||||
|
||||
func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) {
|
||||
tokenStore := NewOneTimeTokenStore(time.Hour)
|
||||
defer tokenStore.Close()
|
||||
ctx := context.Background()
|
||||
tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &ProxyServiceServer{
|
||||
tokenStore: tokenStore,
|
||||
tokenStore: tokenStore,
|
||||
pkceVerifierStore: pkceStore,
|
||||
}
|
||||
s.SetProxyController(newTestProxyController())
|
||||
|
||||
const cluster = "proxy.example.com"
|
||||
const numProxies = 3
|
||||
@@ -67,11 +125,7 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
update := &proto.GetMappingUpdateResponse{
|
||||
Mapping: []*proto.ProxyMapping{mapping},
|
||||
}
|
||||
|
||||
s.SendServiceUpdateToCluster(update, cluster)
|
||||
s.SendServiceUpdateToCluster(context.Background(), mapping, cluster)
|
||||
|
||||
tokens := make([]string, numProxies)
|
||||
for i, ch := range channels {
|
||||
@@ -101,12 +155,18 @@ func TestSendServiceUpdateToCluster_UniqueTokensPerProxy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) {
|
||||
tokenStore := NewOneTimeTokenStore(time.Hour)
|
||||
defer tokenStore.Close()
|
||||
ctx := context.Background()
|
||||
tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &ProxyServiceServer{
|
||||
tokenStore: tokenStore,
|
||||
tokenStore: tokenStore,
|
||||
pkceVerifierStore: pkceStore,
|
||||
}
|
||||
s.SetProxyController(newTestProxyController())
|
||||
|
||||
const cluster = "proxy.example.com"
|
||||
ch1 := registerFakeProxy(s, "proxy-a", cluster)
|
||||
@@ -119,11 +179,7 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) {
|
||||
Domain: "test.example.com",
|
||||
}
|
||||
|
||||
update := &proto.GetMappingUpdateResponse{
|
||||
Mapping: []*proto.ProxyMapping{mapping},
|
||||
}
|
||||
|
||||
s.SendServiceUpdateToCluster(update, cluster)
|
||||
s.SendServiceUpdateToCluster(context.Background(), mapping, cluster)
|
||||
|
||||
resp1 := drainChannel(ch1)
|
||||
resp2 := drainChannel(ch2)
|
||||
@@ -135,18 +191,21 @@ func TestSendServiceUpdateToCluster_DeleteNoToken(t *testing.T) {
|
||||
// Delete operations should not generate tokens
|
||||
assert.Empty(t, resp1.Mapping[0].AuthToken)
|
||||
assert.Empty(t, resp2.Mapping[0].AuthToken)
|
||||
|
||||
// No tokens should have been created
|
||||
assert.Equal(t, 0, tokenStore.GetTokenCount())
|
||||
}
|
||||
|
||||
func TestSendServiceUpdate_UniqueTokensPerProxy(t *testing.T) {
|
||||
tokenStore := NewOneTimeTokenStore(time.Hour)
|
||||
defer tokenStore.Close()
|
||||
ctx := context.Background()
|
||||
tokenStore, err := NewOneTimeTokenStore(ctx, time.Hour, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &ProxyServiceServer{
|
||||
tokenStore: tokenStore,
|
||||
tokenStore: tokenStore,
|
||||
pkceVerifierStore: pkceStore,
|
||||
}
|
||||
s.SetProxyController(newTestProxyController())
|
||||
|
||||
// Register proxies in different clusters (SendServiceUpdate broadcasts to all)
|
||||
ch1 := registerFakeProxy(s, "proxy-a", "cluster-a")
|
||||
@@ -196,10 +255,15 @@ func generateState(s *ProxyServiceServer, redirectURL string) string {
|
||||
}
|
||||
|
||||
func TestOAuthState_NeverTheSame(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &ProxyServiceServer{
|
||||
oidcConfig: ProxyOIDCConfig{
|
||||
HMACKey: []byte("test-hmac-key"),
|
||||
},
|
||||
pkceVerifierStore: pkceStore,
|
||||
}
|
||||
|
||||
redirectURL := "https://app.example.com/callback"
|
||||
@@ -220,31 +284,43 @@ func TestOAuthState_NeverTheSame(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateState_RejectsOldTwoPartFormat(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &ProxyServiceServer{
|
||||
oidcConfig: ProxyOIDCConfig{
|
||||
HMACKey: []byte("test-hmac-key"),
|
||||
},
|
||||
pkceVerifierStore: pkceStore,
|
||||
}
|
||||
|
||||
// Old format had only 2 parts: base64(url)|hmac
|
||||
s.pkceVerifiers.Store("base64url|hmac", pkceEntry{verifier: "test", createdAt: time.Now()})
|
||||
err = s.pkceVerifierStore.Store("base64url|hmac", "test", 10*time.Minute)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err := s.ValidateState("base64url|hmac")
|
||||
_, _, err = s.ValidateState("base64url|hmac")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid state format")
|
||||
}
|
||||
|
||||
func TestValidateState_RejectsInvalidHMAC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &ProxyServiceServer{
|
||||
oidcConfig: ProxyOIDCConfig{
|
||||
HMACKey: []byte("test-hmac-key"),
|
||||
},
|
||||
pkceVerifierStore: pkceStore,
|
||||
}
|
||||
|
||||
// Store with tampered HMAC
|
||||
s.pkceVerifiers.Store("dGVzdA==|nonce|wrong-hmac", pkceEntry{verifier: "test", createdAt: time.Now()})
|
||||
err = s.pkceVerifierStore.Store("dGVzdA==|nonce|wrong-hmac", "test", 10*time.Minute)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err := s.ValidateState("dGVzdA==|nonce|wrong-hmac")
|
||||
_, _, err = s.ValidateState("dGVzdA==|nonce|wrong-hmac")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid state signature")
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/netbirdio/netbird/shared/management/client/common"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/controllers/network_map"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
nbconfig "github.com/netbirdio/netbird/management/internals/server/config"
|
||||
"github.com/netbirdio/netbird/management/server/idp"
|
||||
"github.com/netbirdio/netbird/management/server/job"
|
||||
@@ -82,7 +82,7 @@ type Server struct {
|
||||
syncLimEnabled bool
|
||||
syncLim int32
|
||||
|
||||
reverseProxyManager reverseproxy.Manager
|
||||
reverseProxyManager rpservice.Manager
|
||||
reverseProxyMu sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -330,13 +330,12 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S
|
||||
|
||||
s.secretsManager.SetupRefresh(ctx, accountID, peer.ID)
|
||||
|
||||
if s.appMetrics != nil {
|
||||
s.appMetrics.GRPCMetrics().CountSyncRequestDuration(time.Since(reqStart), accountID)
|
||||
}
|
||||
|
||||
unlock()
|
||||
unlock = nil
|
||||
|
||||
if s.appMetrics != nil {
|
||||
s.appMetrics.GRPCMetrics().CountSyncRequestDuration(time.Since(reqStart), accountID)
|
||||
}
|
||||
log.WithContext(ctx).Debugf("Sync took %s", time.Since(reqStart))
|
||||
|
||||
s.syncSem.Add(-1)
|
||||
@@ -743,13 +742,6 @@ func (s *Server) Login(ctx context.Context, req *proto.EncryptedMessage) (*proto
|
||||
|
||||
log.WithContext(ctx).Debugf("Login request from peer [%s] [%s]", req.WgPubKey, sRealIP)
|
||||
|
||||
defer func() {
|
||||
if s.appMetrics != nil {
|
||||
s.appMetrics.GRPCMetrics().CountLoginRequestDuration(time.Since(reqStart), accountID)
|
||||
}
|
||||
log.WithContext(ctx).Debugf("Login took %s", time.Since(reqStart))
|
||||
}()
|
||||
|
||||
if loginReq.GetMeta() == nil {
|
||||
msg := status.Errorf(codes.FailedPrecondition,
|
||||
"peer system meta has to be provided to log in. Peer %s, remote addr %s", peerKey.String(), realIP)
|
||||
@@ -799,6 +791,11 @@ func (s *Server) Login(ctx context.Context, req *proto.EncryptedMessage) (*proto
|
||||
return nil, status.Errorf(codes.Internal, "failed logging in peer")
|
||||
}
|
||||
|
||||
if s.appMetrics != nil {
|
||||
s.appMetrics.GRPCMetrics().CountLoginRequestDuration(time.Since(reqStart), accountID)
|
||||
}
|
||||
log.WithContext(ctx).Debugf("Login took %s", time.Since(reqStart))
|
||||
|
||||
return &proto.EncryptedMessage{
|
||||
WgPubKey: key.PublicKey().String(),
|
||||
Body: encryptedResp,
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/sessionkey"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
@@ -34,11 +34,18 @@ func setupValidateSessionTest(t *testing.T) *validateSessionTestSetup {
|
||||
testStore, storeCleanup, err := store.NewTestStoreFromSQL(ctx, "../../../server/testdata/auth_callback.sql", t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyManager := &testValidateSessionProxyManager{store: testStore}
|
||||
serviceManager := &testValidateSessionServiceManager{store: testStore}
|
||||
usersManager := &testValidateSessionUsersManager{store: testStore}
|
||||
proxyManager := &testValidateSessionProxyManager{}
|
||||
|
||||
proxyService := NewProxyServiceServer(nil, NewOneTimeTokenStore(time.Minute), ProxyOIDCConfig{}, nil, usersManager)
|
||||
proxyService.SetProxyManager(proxyManager)
|
||||
tokenStore, err := NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
pkceStore, err := NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyService := NewProxyServiceServer(nil, tokenStore, pkceStore, ProxyOIDCConfig{}, nil, usersManager, proxyManager)
|
||||
proxyService.SetServiceManager(serviceManager)
|
||||
|
||||
createTestProxies(t, ctx, testStore)
|
||||
|
||||
@@ -54,7 +61,7 @@ func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store)
|
||||
|
||||
pubKey, privKey := generateSessionKeyPair(t)
|
||||
|
||||
testProxy := &reverseproxy.Service{
|
||||
testProxy := &service.Service{
|
||||
ID: "testProxyId",
|
||||
AccountID: "testAccountId",
|
||||
Name: "Test Proxy",
|
||||
@@ -62,15 +69,15 @@ func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store)
|
||||
Enabled: true,
|
||||
SessionPrivateKey: privKey,
|
||||
SessionPublicKey: pubKey,
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, testStore.CreateService(ctx, testProxy))
|
||||
|
||||
restrictedProxy := &reverseproxy.Service{
|
||||
restrictedProxy := &service.Service{
|
||||
ID: "restrictedProxyId",
|
||||
AccountID: "testAccountId",
|
||||
Name: "Restricted Proxy",
|
||||
@@ -78,8 +85,8 @@ func createTestProxies(t *testing.T, ctx context.Context, testStore store.Store)
|
||||
Enabled: true,
|
||||
SessionPrivateKey: privKey,
|
||||
SessionPublicKey: pubKey,
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{"allowedGroupId"},
|
||||
},
|
||||
@@ -239,79 +246,101 @@ func TestValidateSession_MissingToken(t *testing.T) {
|
||||
assert.Contains(t, resp.DeniedReason, "missing")
|
||||
}
|
||||
|
||||
type testValidateSessionProxyManager struct {
|
||||
type testValidateSessionServiceManager struct {
|
||||
store store.Store
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetService(_ context.Context, _, _, _ string) (*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) GetService(_ context.Context, _, _, _ string) (*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) CreateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) UpdateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) DeleteService(_ context.Context, _, _, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) DeleteService(_ context.Context, _, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) DeleteAllServices(_ context.Context, _, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) DeleteAllServices(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) SetCertificateIssuedAt(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) SetStatus(_ context.Context, _, _ string, _ reverseproxy.ProxyStatus) error {
|
||||
func (m *testValidateSessionServiceManager) SetStatus(_ context.Context, _, _ string, _ service.Status) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) ReloadAllServicesForAccount(_ context.Context, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) ReloadAllServicesForAccount(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) ReloadService(_ context.Context, _, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) ReloadService(_ context.Context, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) {
|
||||
return m.store.GetServices(ctx, store.LockingStrengthNone)
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*service.Service, error) {
|
||||
return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, proxyID)
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (m *testValidateSessionServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) {
|
||||
return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID)
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) {
|
||||
func (m *testValidateSessionServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) {
|
||||
func (m *testValidateSessionServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) RenewServiceFromPeer(_ context.Context, _, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error {
|
||||
func (m *testValidateSessionServiceManager) StopServiceFromPeer(_ context.Context, _, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) StartExposeReaper(_ context.Context) {}
|
||||
func (m *testValidateSessionServiceManager) StartExposeReaper(_ context.Context) {}
|
||||
|
||||
type testValidateSessionProxyManager struct{}
|
||||
|
||||
func (m *testValidateSessionProxyManager) Connect(_ context.Context, _, _, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) Disconnect(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) Heartbeat(_ context.Context, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) GetActiveClusterAddresses(_ context.Context) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testValidateSessionProxyManager) CleanupStale(_ context.Context, _ time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type testValidateSessionUsersManager struct {
|
||||
store store.Store
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/server/job"
|
||||
"github.com/netbirdio/netbird/shared/auth"
|
||||
|
||||
@@ -83,9 +83,9 @@ type DefaultAccountManager struct {
|
||||
|
||||
requestBuffer *AccountRequestBuffer
|
||||
|
||||
proxyController port_forwarding.Controller
|
||||
settingsManager settings.Manager
|
||||
reverseProxyManager reverseproxy.Manager
|
||||
proxyController port_forwarding.Controller
|
||||
settingsManager settings.Manager
|
||||
serviceManager service.Manager
|
||||
|
||||
// config contains the management server configuration
|
||||
config *nbconfig.Config
|
||||
@@ -115,8 +115,8 @@ type DefaultAccountManager struct {
|
||||
|
||||
var _ account.Manager = (*DefaultAccountManager)(nil)
|
||||
|
||||
func (am *DefaultAccountManager) SetServiceManager(serviceManager reverseproxy.Manager) {
|
||||
am.reverseProxyManager = serviceManager
|
||||
func (am *DefaultAccountManager) SetServiceManager(serviceManager service.Manager) {
|
||||
am.serviceManager = serviceManager
|
||||
}
|
||||
|
||||
func isUniqueConstraintError(err error) bool {
|
||||
@@ -395,7 +395,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(ctx context.Context, acco
|
||||
am.StoreEvent(ctx, userID, accountID, accountID, activity.AccountNetworkRangeUpdated, eventMeta)
|
||||
}
|
||||
if reloadReverseProxy {
|
||||
if err = am.reverseProxyManager.ReloadAllServicesForAccount(ctx, accountID); err != nil {
|
||||
if err = am.serviceManager.ReloadAllServicesForAccount(ctx, accountID); err != nil {
|
||||
log.WithContext(ctx).Warnf("failed to reload all services for account %s: %v", accountID, err)
|
||||
}
|
||||
}
|
||||
@@ -730,7 +730,7 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u
|
||||
return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err)
|
||||
}
|
||||
|
||||
err = am.reverseProxyManager.DeleteAllServices(ctx, accountID, userID)
|
||||
err = am.serviceManager.DeleteAllServices(ctx, accountID, userID)
|
||||
if err != nil {
|
||||
return status.Errorf(status.Internal, "failed to delete service %s: %v", accountID, err)
|
||||
}
|
||||
@@ -1379,9 +1379,10 @@ func (am *DefaultAccountManager) GetAccountIDFromUserAuth(ctx context.Context, u
|
||||
if am.singleAccountMode && am.singleAccountModeDomain != "" {
|
||||
// This section is mostly related to self-hosted installations.
|
||||
// We override incoming domain claims to group users under a single account.
|
||||
userAuth.Domain = am.singleAccountModeDomain
|
||||
userAuth.DomainCategory = types.PrivateCategory
|
||||
log.WithContext(ctx).Debugf("overriding JWT Domain and DomainCategory claims since single account mode is enabled")
|
||||
err := am.updateUserAuthWithSingleMode(ctx, &userAuth)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
|
||||
accountID, err := am.getAccountIDWithAuthorizationClaims(ctx, userAuth)
|
||||
@@ -1414,6 +1415,35 @@ func (am *DefaultAccountManager) GetAccountIDFromUserAuth(ctx context.Context, u
|
||||
return accountID, user.Id, nil
|
||||
}
|
||||
|
||||
// updateUserAuthWithSingleMode modifies the userAuth with the single account domain, or if there is an existing account, with the domain of that account
|
||||
func (am *DefaultAccountManager) updateUserAuthWithSingleMode(ctx context.Context, userAuth *auth.UserAuth) error {
|
||||
userAuth.DomainCategory = types.PrivateCategory
|
||||
userAuth.Domain = am.singleAccountModeDomain
|
||||
|
||||
accountID, err := am.Store.GetAnyAccountID(ctx)
|
||||
if err != nil {
|
||||
if e, ok := status.FromError(err); !ok || e.Type() != status.NotFound {
|
||||
return err
|
||||
}
|
||||
log.WithContext(ctx).Debugf("using singleAccountModeDomain to override JWT Domain and DomainCategory claims in single account mode")
|
||||
return nil
|
||||
}
|
||||
|
||||
if accountID == "" {
|
||||
log.WithContext(ctx).Debugf("using singleAccountModeDomain to override JWT Domain and DomainCategory claims in single account mode")
|
||||
return nil
|
||||
}
|
||||
|
||||
domain, _, err := am.Store.GetAccountDomainAndCategory(ctx, store.LockingStrengthNone, accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userAuth.Domain = domain
|
||||
|
||||
log.WithContext(ctx).Debugf("overriding JWT Domain and DomainCategory claims since single account mode is enabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncJWTGroups processes the JWT groups for a user, updates the account based on the groups,
|
||||
// and propagates changes to peers if group propagation is enabled.
|
||||
// requires userAuth to have been ValidateAndParseToken and EnsureUserAccessByJWTGroups by the AuthManager
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package account
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -package account -destination=manager_mock.go -source=./manager.go -build_flags=-mod=mod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/shared/auth"
|
||||
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
@@ -61,11 +63,11 @@ type Manager interface {
|
||||
GetPeers(ctx context.Context, accountID, userID, nameFilter, ipFilter string) ([]*nbpeer.Peer, error)
|
||||
MarkPeerConnected(ctx context.Context, peerKey string, connected bool, realIP net.IP, accountID string, syncTime time.Time) error
|
||||
DeletePeer(ctx context.Context, accountID, peerID, userID string) error
|
||||
UpdatePeer(ctx context.Context, accountID, userID string, peer *nbpeer.Peer) (*nbpeer.Peer, error)
|
||||
UpdatePeer(ctx context.Context, accountID, userID string, p *nbpeer.Peer) (*nbpeer.Peer, error)
|
||||
UpdatePeerIP(ctx context.Context, accountID, userID, peerID string, newIP netip.Addr) error
|
||||
GetNetworkMap(ctx context.Context, peerID string) (*types.NetworkMap, error)
|
||||
GetPeerNetwork(ctx context.Context, peerID string) (*types.Network, error)
|
||||
AddPeer(ctx context.Context, accountID, setupKey, userID string, peer *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error)
|
||||
AddPeer(ctx context.Context, accountID, setupKey, userID string, p *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error)
|
||||
CreatePAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error)
|
||||
DeletePAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenID string) error
|
||||
GetPAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenID string) (*types.PersonalAccessToken, error)
|
||||
@@ -140,5 +142,5 @@ type Manager interface {
|
||||
CreatePeerJob(ctx context.Context, accountID, peerID, userID string, job *types.Job) error
|
||||
GetAllPeerJobs(ctx context.Context, accountID, userID, peerID string) ([]*types.Job, error)
|
||||
GetPeerJobByID(ctx context.Context, accountID, userID, peerID, jobID string) (*types.Job, error)
|
||||
SetServiceManager(serviceManager reverseproxy.Manager)
|
||||
SetServiceManager(serviceManager service.Manager)
|
||||
}
|
||||
|
||||
1738
management/server/account/manager_mock.go
Normal file
1738
management/server/account/manager_mock.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -15,10 +15,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/netbirdio/netbird/shared/management/status"
|
||||
"github.com/prometheus/client_golang/prometheus/push"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
@@ -27,8 +29,10 @@ import (
|
||||
"github.com/netbirdio/netbird/management/internals/controllers/network_map/update_channel"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/peers"
|
||||
ephemeral_manager "github.com/netbirdio/netbird/management/internals/modules/peers/ephemeral/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones"
|
||||
"github.com/netbirdio/netbird/management/internals/server/config"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
@@ -1803,12 +1807,12 @@ func TestAccount_Copy(t *testing.T) {
|
||||
Address: "172.12.6.1/24",
|
||||
},
|
||||
},
|
||||
Services: []*reverseproxy.Service{
|
||||
Services: []*service.Service{
|
||||
{
|
||||
ID: "service1",
|
||||
Name: "test-service",
|
||||
AccountID: "account1",
|
||||
Targets: []*reverseproxy.Target{},
|
||||
Targets: []*service.Target{},
|
||||
},
|
||||
},
|
||||
NetworkMapCache: &types.NetworkMapBuilder{},
|
||||
@@ -3113,6 +3117,12 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU
|
||||
permissionsManager := permissions.NewManager(store)
|
||||
peersManager := peers.NewManager(store, permissionsManager)
|
||||
|
||||
proxyManager := proxy.NewMockManager(ctrl)
|
||||
proxyManager.EXPECT().
|
||||
CleanupStale(gomock.Any(), gomock.Any()).
|
||||
Return(nil).
|
||||
AnyTimes()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
updateManager := update_channel.NewPeersUpdateManager(metrics)
|
||||
@@ -3123,8 +3133,12 @@ func createManager(t testing.TB) (*DefaultAccountManager, *update_channel.PeersU
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil)
|
||||
manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, settingsMockManager, proxyGrpcServer, nil))
|
||||
proxyGrpcServer := nbgrpc.NewProxyServiceServer(nil, nil, nil, nbgrpc.ProxyOIDCConfig{}, peersManager, nil, proxyManager)
|
||||
proxyController, err := proxymanager.NewGRPCController(proxyGrpcServer, noop.Meter{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
manager.SetServiceManager(reverseproxymanager.NewManager(store, manager, permissionsManager, proxyController, nil))
|
||||
|
||||
return manager, updateManager, nil
|
||||
}
|
||||
@@ -3953,3 +3967,116 @@ func TestDefaultAccountManager_UpdateAccountSettings_NetworkRangeChange(t *testi
|
||||
t.Fatal("UpdateAccountSettings deadlocked when changing NetworkRange")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserAuthWithSingleMode(t *testing.T) {
|
||||
t.Run("sets defaults and overrides domain from store", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetAnyAccountID(gomock.Any()).
|
||||
Return("account-1", nil)
|
||||
mockStore.EXPECT().
|
||||
GetAccountDomainAndCategory(gomock.Any(), store.LockingStrengthNone, "account-1").
|
||||
Return("real-domain.com", "private", nil)
|
||||
|
||||
am := &DefaultAccountManager{
|
||||
Store: mockStore,
|
||||
singleAccountModeDomain: "fallback.com",
|
||||
}
|
||||
|
||||
userAuth := &auth.UserAuth{}
|
||||
err := am.updateUserAuthWithSingleMode(context.Background(), userAuth)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "real-domain.com", userAuth.Domain)
|
||||
assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory)
|
||||
})
|
||||
|
||||
t.Run("falls back to singleAccountModeDomain when account ID is empty", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetAnyAccountID(gomock.Any()).
|
||||
Return("", nil)
|
||||
|
||||
am := &DefaultAccountManager{
|
||||
Store: mockStore,
|
||||
singleAccountModeDomain: "fallback.com",
|
||||
}
|
||||
|
||||
userAuth := &auth.UserAuth{}
|
||||
err := am.updateUserAuthWithSingleMode(context.Background(), userAuth)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "fallback.com", userAuth.Domain)
|
||||
assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory)
|
||||
})
|
||||
|
||||
t.Run("falls back to singleAccountModeDomain on NotFound error", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetAnyAccountID(gomock.Any()).
|
||||
Return("", status.Errorf(status.NotFound, "no accounts"))
|
||||
|
||||
am := &DefaultAccountManager{
|
||||
Store: mockStore,
|
||||
singleAccountModeDomain: "fallback.com",
|
||||
}
|
||||
|
||||
userAuth := &auth.UserAuth{}
|
||||
err := am.updateUserAuthWithSingleMode(context.Background(), userAuth)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "fallback.com", userAuth.Domain)
|
||||
assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory)
|
||||
})
|
||||
|
||||
t.Run("propagates non-NotFound error from GetAnyAccountID", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetAnyAccountID(gomock.Any()).
|
||||
Return("", status.Errorf(status.Internal, "db down"))
|
||||
|
||||
am := &DefaultAccountManager{
|
||||
Store: mockStore,
|
||||
singleAccountModeDomain: "fallback.com",
|
||||
}
|
||||
|
||||
userAuth := &auth.UserAuth{}
|
||||
err := am.updateUserAuthWithSingleMode(context.Background(), userAuth)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "db down")
|
||||
// Defaults should still be set before error path
|
||||
assert.Equal(t, types.PrivateCategory, userAuth.DomainCategory)
|
||||
})
|
||||
|
||||
t.Run("propagates error from GetAccountDomainAndCategory", func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Cleanup(ctrl.Finish)
|
||||
|
||||
mockStore := store.NewMockStore(ctrl)
|
||||
mockStore.EXPECT().
|
||||
GetAnyAccountID(gomock.Any()).
|
||||
Return("account-1", nil)
|
||||
mockStore.EXPECT().
|
||||
GetAccountDomainAndCategory(gomock.Any(), store.LockingStrengthNone, "account-1").
|
||||
Return("", "", status.Errorf(status.Internal, "query failed"))
|
||||
|
||||
am := &DefaultAccountManager{
|
||||
Store: mockStore,
|
||||
singleAccountModeDomain: "fallback.com",
|
||||
}
|
||||
|
||||
userAuth := &auth.UserAuth{}
|
||||
err := am.updateUserAuthWithSingleMode(context.Background(), userAuth)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "query failed")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -220,6 +220,13 @@ const (
|
||||
// AccountPeerExposeDisabled indicates that a user disabled peer expose for the account
|
||||
AccountPeerExposeDisabled Activity = 115
|
||||
|
||||
// DomainAdded indicates that a user added a custom domain
|
||||
DomainAdded Activity = 118
|
||||
// DomainDeleted indicates that a user deleted a custom domain
|
||||
DomainDeleted Activity = 119
|
||||
// DomainValidated indicates that a custom domain was validated
|
||||
DomainValidated Activity = 120
|
||||
|
||||
AccountDeleted Activity = 99999
|
||||
)
|
||||
|
||||
@@ -364,6 +371,10 @@ var activityMap = map[Activity]Code{
|
||||
|
||||
AccountPeerExposeEnabled: {"Account peer expose enabled", "account.setting.peer.expose.enable"},
|
||||
AccountPeerExposeDisabled: {"Account peer expose disabled", "account.setting.peer.expose.disable"},
|
||||
|
||||
DomainAdded: {"Domain added", "domain.add"},
|
||||
DomainDeleted: {"Domain deleted", "domain.delete"},
|
||||
DomainValidated: {"Domain validated", "domain.validate"},
|
||||
}
|
||||
|
||||
// StringCode returns a string code of the activity
|
||||
|
||||
@@ -249,7 +249,15 @@ func initDatabase(ctx context.Context, dataDir string) (*gorm.DB, error) {
|
||||
|
||||
switch storeEngine {
|
||||
case types.SqliteStoreEngine:
|
||||
dialector = sqlite.Open(filepath.Join(dataDir, eventSinkDB))
|
||||
dbFile := eventSinkDB
|
||||
if envFile, ok := os.LookupEnv("NB_ACTIVITY_EVENT_SQLITE_FILE"); ok && envFile != "" {
|
||||
dbFile = envFile
|
||||
}
|
||||
connStr := dbFile
|
||||
if !filepath.IsAbs(dbFile) {
|
||||
connStr = filepath.Join(dataDir, dbFile)
|
||||
}
|
||||
dialector = sqlite.Open(connStr)
|
||||
case types.PostgresStoreEngine:
|
||||
dsn, ok := os.LookupEnv(postgresDsnEnv)
|
||||
if !ok {
|
||||
|
||||
@@ -425,6 +425,11 @@ func (am *DefaultAccountManager) DeleteGroups(ctx context.Context, accountID, us
|
||||
var groupIDsToDelete []string
|
||||
var deletedGroups []*types.Group
|
||||
|
||||
extraSettings, err := am.settingsManager.GetExtraSettings(ctx, accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
|
||||
for _, groupID := range groupIDs {
|
||||
group, err := transaction.GetGroupByID(ctx, store.LockingStrengthNone, accountID, groupID)
|
||||
@@ -433,7 +438,7 @@ func (am *DefaultAccountManager) DeleteGroups(ctx context.Context, accountID, us
|
||||
continue
|
||||
}
|
||||
|
||||
if err := validateDeleteGroup(ctx, transaction, group, userID); err != nil {
|
||||
if err = validateDeleteGroup(ctx, transaction, group, userID, extraSettings.FlowGroups); err != nil {
|
||||
allErrors = errors.Join(allErrors, err)
|
||||
continue
|
||||
}
|
||||
@@ -621,7 +626,7 @@ func validateNewGroup(ctx context.Context, transaction store.Store, accountID st
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDeleteGroup(ctx context.Context, transaction store.Store, group *types.Group, userID string) error {
|
||||
func validateDeleteGroup(ctx context.Context, transaction store.Store, group *types.Group, userID string, flowGroups []string) error {
|
||||
// disable a deleting integration group if the initiator is not an admin service user
|
||||
if group.Issued == types.GroupIssuedIntegration {
|
||||
executingUser, err := transaction.GetUserByUserID(ctx, store.LockingStrengthNone, userID)
|
||||
@@ -641,6 +646,10 @@ func validateDeleteGroup(ctx context.Context, transaction store.Store, group *ty
|
||||
return &GroupLinkError{"network resource", group.Resources[0].ID}
|
||||
}
|
||||
|
||||
if slices.Contains(flowGroups, group.ID) {
|
||||
return &GroupLinkError{"settings", "traffic event logging"}
|
||||
}
|
||||
|
||||
if isLinked, linkedRoute := isGroupLinkedToRoute(ctx, transaction, group.AccountID, group.ID); isLinked {
|
||||
return &GroupLinkError{"route", string(linkedRoute.NetID)}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
networkTypes "github.com/netbirdio/netbird/management/server/networks/types"
|
||||
peer2 "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/management/server/permissions"
|
||||
"github.com/netbirdio/netbird/management/server/settings"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
"github.com/netbirdio/netbird/route"
|
||||
@@ -284,6 +286,67 @@ func TestDefaultAccountManager_DeleteGroups(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultAccountManager_DeleteGroupLinkedToFlowGroup(t *testing.T) {
|
||||
am, _, err := createManager(t)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
settingsMock := settings.NewMockManager(ctrl)
|
||||
settingsMock.EXPECT().
|
||||
GetExtraSettings(gomock.Any(), gomock.Any()).
|
||||
Return(&types.ExtraSettings{FlowGroups: []string{"grp-for-flow"}}, nil).
|
||||
AnyTimes()
|
||||
settingsMock.EXPECT().
|
||||
UpdateExtraSettings(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
|
||||
Return(false, nil).
|
||||
AnyTimes()
|
||||
am.settingsManager = settingsMock
|
||||
|
||||
_, account, err := initTestGroupAccount(am)
|
||||
require.NoError(t, err)
|
||||
|
||||
grp := &types.Group{
|
||||
ID: "grp-for-flow",
|
||||
AccountID: account.Id,
|
||||
Name: "Group for flow",
|
||||
Issued: types.GroupIssuedAPI,
|
||||
Peers: make([]string, 0),
|
||||
}
|
||||
require.NoError(t, am.CreateGroup(context.Background(), account.Id, groupAdminUserID, grp))
|
||||
|
||||
err = am.DeleteGroup(context.Background(), account.Id, groupAdminUserID, "grp-for-flow")
|
||||
require.Error(t, err)
|
||||
|
||||
var gErr *GroupLinkError
|
||||
require.ErrorAs(t, err, &gErr)
|
||||
assert.Equal(t, "settings", gErr.Resource)
|
||||
assert.Equal(t, "traffic event logging", gErr.Name)
|
||||
|
||||
group, err := am.GetGroup(context.Background(), account.Id, "grp-for-flow", groupAdminUserID)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, group)
|
||||
|
||||
regularGrp := &types.Group{
|
||||
ID: "grp-regular",
|
||||
AccountID: account.Id,
|
||||
Name: "Regular group",
|
||||
Issued: types.GroupIssuedAPI,
|
||||
Peers: make([]string, 0),
|
||||
}
|
||||
err = am.CreateGroup(context.Background(), account.Id, groupAdminUserID, regularGrp)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = am.DeleteGroups(context.Background(), account.Id, groupAdminUserID, []string{"grp-for-flow", "grp-regular"})
|
||||
require.Error(t, err)
|
||||
|
||||
group, err = am.GetGroup(context.Background(), account.Id, "grp-for-flow", groupAdminUserID)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, group)
|
||||
|
||||
_, err = am.GetGroup(context.Background(), account.Id, "grp-regular", groupAdminUserID)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func initTestGroupAccount(am *DefaultAccountManager) (*DefaultAccountManager, *types.Account, error) {
|
||||
accountID := "testingAcc"
|
||||
domain := "example.com"
|
||||
@@ -703,7 +766,7 @@ func TestGroupAccountPeersUpdate(t *testing.T) {
|
||||
t.Run("saving group linked to network router", func(t *testing.T) {
|
||||
permissionsManager := permissions.NewManager(manager.Store)
|
||||
groupsManager := groups.NewManager(manager.Store, permissionsManager, manager)
|
||||
resourcesManager := resources.NewManager(manager.Store, permissionsManager, groupsManager, manager, manager.reverseProxyManager)
|
||||
resourcesManager := resources.NewManager(manager.Store, permissionsManager, groupsManager, manager, manager.serviceManager)
|
||||
routersManager := routers.NewManager(manager.Store, permissionsManager, manager)
|
||||
networksManager := networks.NewManager(manager.Store, permissionsManager, resourcesManager, routersManager, manager)
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager"
|
||||
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
idpmanager "github.com/netbirdio/netbird/management/server/idp"
|
||||
@@ -73,7 +73,7 @@ const (
|
||||
)
|
||||
|
||||
// NewAPIHandler creates the Management service HTTP API handler registering all the available endpoints.
|
||||
func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, reverseProxyManager reverseproxy.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix) (http.Handler, error) {
|
||||
func NewAPIHandler(ctx context.Context, accountManager account.Manager, networksManager nbnetworks.Manager, resourceManager resources.Manager, routerManager routers.Manager, groupsManager nbgroups.Manager, LocationManager geolocation.Geolocation, authManager auth.Manager, appMetrics telemetry.AppMetrics, integratedValidator integrated_validator.IntegratedValidator, proxyController port_forwarding.Controller, permissionsManager permissions.Manager, peersManager nbpeers.Manager, settingsManager settings.Manager, zManager zones.Manager, rManager records.Manager, networkMapController network_map.Controller, idpManager idpmanager.Manager, serviceManager service.Manager, reverseProxyDomainManager *manager.Manager, reverseProxyAccessLogsManager accesslogs.Manager, proxyGRPCServer *nbgrpc.ProxyServiceServer, trustedHTTPProxies []netip.Prefix) (http.Handler, error) {
|
||||
|
||||
// Register bypass paths for unauthenticated endpoints
|
||||
if err := bypass.AddBypassPath("/api/instance"); err != nil {
|
||||
@@ -173,8 +173,8 @@ func NewAPIHandler(ctx context.Context, accountManager account.Manager, networks
|
||||
idp.AddEndpoints(accountManager, router)
|
||||
instance.AddEndpoints(instanceManager, router)
|
||||
instance.AddVersionEndpoint(instanceManager, router)
|
||||
if reverseProxyManager != nil && reverseProxyDomainManager != nil {
|
||||
reverseproxymanager.RegisterEndpoints(reverseProxyManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, router)
|
||||
if serviceManager != nil && reverseProxyDomainManager != nil {
|
||||
reverseproxymanager.RegisterEndpoints(serviceManager, *reverseProxyDomainManager, reverseProxyAccessLogsManager, router)
|
||||
}
|
||||
|
||||
// Register OAuth callback handler for proxy authentication
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
"github.com/netbirdio/netbird/management/server/store"
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
@@ -190,7 +190,11 @@ func setupAuthCallbackTest(t *testing.T) *testSetup {
|
||||
|
||||
oidcServer := newFakeOIDCServer()
|
||||
|
||||
tokenStore := nbgrpc.NewOneTimeTokenStore(time.Minute)
|
||||
tokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
pkceStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
require.NoError(t, err)
|
||||
|
||||
usersManager := users.NewManager(testStore)
|
||||
|
||||
@@ -205,12 +209,14 @@ func setupAuthCallbackTest(t *testing.T) *testSetup {
|
||||
proxyService := nbgrpc.NewProxyServiceServer(
|
||||
&testAccessLogManager{},
|
||||
tokenStore,
|
||||
pkceStore,
|
||||
oidcConfig,
|
||||
nil,
|
||||
usersManager,
|
||||
nil,
|
||||
)
|
||||
|
||||
proxyService.SetProxyManager(&testServiceManager{store: testStore})
|
||||
proxyService.SetServiceManager(&testServiceManager{store: testStore})
|
||||
|
||||
handler := NewAuthCallbackHandler(proxyService, nil)
|
||||
|
||||
@@ -239,12 +245,12 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store
|
||||
pubKey := base64.StdEncoding.EncodeToString(pub)
|
||||
privKey := base64.StdEncoding.EncodeToString(priv)
|
||||
|
||||
testProxy := &reverseproxy.Service{
|
||||
testProxy := &service.Service{
|
||||
ID: "testProxyId",
|
||||
AccountID: "testAccountId",
|
||||
Name: "Test Proxy",
|
||||
Domain: "test-proxy.example.com",
|
||||
Targets: []*reverseproxy.Target{{
|
||||
Targets: []*service.Target{{
|
||||
Path: strPtr("/"),
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
@@ -254,8 +260,8 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store
|
||||
Enabled: true,
|
||||
}},
|
||||
Enabled: true,
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{"allowedGroupId"},
|
||||
},
|
||||
@@ -265,12 +271,12 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store
|
||||
}
|
||||
require.NoError(t, testStore.CreateService(ctx, testProxy))
|
||||
|
||||
restrictedProxy := &reverseproxy.Service{
|
||||
restrictedProxy := &service.Service{
|
||||
ID: "restrictedProxyId",
|
||||
AccountID: "testAccountId",
|
||||
Name: "Restricted Proxy",
|
||||
Domain: "restricted-proxy.example.com",
|
||||
Targets: []*reverseproxy.Target{{
|
||||
Targets: []*service.Target{{
|
||||
Path: strPtr("/"),
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
@@ -280,8 +286,8 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store
|
||||
Enabled: true,
|
||||
}},
|
||||
Enabled: true,
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
DistributionGroups: []string{"restrictedGroupId"},
|
||||
},
|
||||
@@ -291,12 +297,12 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store
|
||||
}
|
||||
require.NoError(t, testStore.CreateService(ctx, restrictedProxy))
|
||||
|
||||
noAuthProxy := &reverseproxy.Service{
|
||||
noAuthProxy := &service.Service{
|
||||
ID: "noAuthProxyId",
|
||||
AccountID: "testAccountId",
|
||||
Name: "No Auth Proxy",
|
||||
Domain: "no-auth-proxy.example.com",
|
||||
Targets: []*reverseproxy.Target{{
|
||||
Targets: []*service.Target{{
|
||||
Path: strPtr("/"),
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
@@ -306,8 +312,8 @@ func createTestReverseProxies(t *testing.T, ctx context.Context, testStore store
|
||||
Enabled: true,
|
||||
}},
|
||||
Enabled: true,
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{
|
||||
Auth: service.AuthConfig{
|
||||
BearerAuth: &service.BearerAuthConfig{
|
||||
Enabled: false,
|
||||
},
|
||||
},
|
||||
@@ -361,19 +367,19 @@ func (m *testServiceManager) DeleteAllServices(ctx context.Context, accountID, u
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) GetAllServices(_ context.Context, _, _ string) ([]*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) GetService(_ context.Context, _, _, _ string) (*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) GetService(_ context.Context, _, _, _ string) (*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) CreateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) CreateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) UpdateService(_ context.Context, _, _ string, _ *reverseproxy.Service) (*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) UpdateService(_ context.Context, _, _ string, _ *service.Service) (*service.Service, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -385,7 +391,7 @@ func (m *testServiceManager) SetCertificateIssuedAt(_ context.Context, _, _ stri
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) SetStatus(_ context.Context, _, _ string, _ reverseproxy.ProxyStatus) error {
|
||||
func (m *testServiceManager) SetStatus(_ context.Context, _, _ string, _ service.Status) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -397,15 +403,15 @@ func (m *testServiceManager) ReloadService(_ context.Context, _, _ string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) GetGlobalServices(ctx context.Context) ([]*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) GetGlobalServices(ctx context.Context) ([]*service.Service, error) {
|
||||
return m.store.GetServices(ctx, store.LockingStrengthNone)
|
||||
}
|
||||
|
||||
func (m *testServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) GetServiceByID(ctx context.Context, accountID, proxyID string) (*service.Service, error) {
|
||||
return m.store.GetServiceByID(ctx, store.LockingStrengthNone, accountID, proxyID)
|
||||
}
|
||||
|
||||
func (m *testServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (m *testServiceManager) GetAccountServices(ctx context.Context, accountID string) ([]*service.Service, error) {
|
||||
return m.store.GetAccountServices(ctx, store.LockingStrengthNone, accountID)
|
||||
}
|
||||
|
||||
@@ -413,7 +419,7 @@ func (m *testServiceManager) GetServiceIDByTargetID(_ context.Context, _, _ stri
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *reverseproxy.ExposeServiceRequest) (*reverseproxy.ExposeServiceResponse, error) {
|
||||
func (m *testServiceManager) CreateServiceFromPeer(_ context.Context, _, _ string, _ *service.ExposeServiceRequest) (*service.ExposeServiceResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,10 +9,13 @@ import (
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
|
||||
"github.com/netbirdio/management-integrations/integrations"
|
||||
accesslogsmanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs/manager"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain/manager"
|
||||
reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/manager"
|
||||
proxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy/manager"
|
||||
reverseproxymanager "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service/manager"
|
||||
nbgrpc "github.com/netbirdio/netbird/management/internals/shared/grpc"
|
||||
|
||||
zonesManager "github.com/netbirdio/netbird/management/internals/modules/zones/manager"
|
||||
@@ -91,12 +94,28 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee
|
||||
}
|
||||
|
||||
accessLogsManager := accesslogsmanager.NewManager(store, permissionsManager, nil)
|
||||
proxyTokenStore := nbgrpc.NewOneTimeTokenStore(1 * time.Minute)
|
||||
proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager)
|
||||
domainManager := manager.NewManager(store, proxyServiceServer, permissionsManager)
|
||||
reverseProxyManager := reverseproxymanager.NewManager(store, am, permissionsManager, settingsManager, proxyServiceServer, domainManager)
|
||||
proxyServiceServer.SetProxyManager(reverseProxyManager)
|
||||
am.SetServiceManager(reverseProxyManager)
|
||||
proxyTokenStore, err := nbgrpc.NewOneTimeTokenStore(ctx, 5*time.Minute, 10*time.Minute, 100)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create proxy token store: %v", err)
|
||||
}
|
||||
pkceverifierStore, err := nbgrpc.NewPKCEVerifierStore(ctx, 10*time.Minute, 10*time.Minute, 100)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PKCE verifier store: %v", err)
|
||||
}
|
||||
noopMeter := noop.NewMeterProvider().Meter("")
|
||||
proxyMgr, err := proxymanager.NewManager(store, noopMeter)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create proxy manager: %v", err)
|
||||
}
|
||||
proxyServiceServer := nbgrpc.NewProxyServiceServer(accessLogsManager, proxyTokenStore, pkceverifierStore, nbgrpc.ProxyOIDCConfig{}, peersManager, userManager, proxyMgr)
|
||||
domainManager := manager.NewManager(store, proxyMgr, permissionsManager, am)
|
||||
serviceProxyController, err := proxymanager.NewGRPCController(proxyServiceServer, noopMeter)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create proxy controller: %v", err)
|
||||
}
|
||||
serviceManager := reverseproxymanager.NewManager(store, am, permissionsManager, serviceProxyController, domainManager)
|
||||
proxyServiceServer.SetServiceManager(serviceManager)
|
||||
am.SetServiceManager(serviceManager)
|
||||
|
||||
// @note this is required so that PAT's validate from store, but JWT's are mocked
|
||||
authManager := serverauth.NewManager(store, "", "", "", "", []string{}, false)
|
||||
@@ -114,7 +133,7 @@ func BuildApiBlackBoxWithDBState(t testing_tools.TB, sqlFile string, expectedPee
|
||||
customZonesManager := zonesManager.NewManager(store, am, permissionsManager, "")
|
||||
zoneRecordsManager := recordsManager.NewManager(store, am, permissionsManager)
|
||||
|
||||
apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, reverseProxyManager, nil, nil, nil, nil)
|
||||
apiHandler, err := http2.NewAPIHandler(context.Background(), am, networksManagerMock, resourcesManagerMock, routersManagerMock, groupsManagerMock, geoMock, authManagerMock, metrics, validatorMock, proxyController, permissionsManager, peersManager, settingsManager, customZonesManager, zoneRecordsManager, networkMapController, nil, serviceManager, nil, nil, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create API handler: %v", err)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ type EmbeddedIdPConfig struct {
|
||||
|
||||
// EmbeddedStorageConfig holds storage configuration for the embedded IdP.
|
||||
type EmbeddedStorageConfig struct {
|
||||
// Type is the storage type (currently only "sqlite3" is supported)
|
||||
// Type is the storage type: "sqlite3" (default) or "postgres"
|
||||
Type string
|
||||
// Config contains type-specific configuration
|
||||
Config EmbeddedStorageTypeConfig
|
||||
@@ -62,6 +62,8 @@ type EmbeddedStorageConfig struct {
|
||||
type EmbeddedStorageTypeConfig struct {
|
||||
// File is the path to the SQLite database file (for sqlite3 type)
|
||||
File string
|
||||
// DSN is the connection string for postgres
|
||||
DSN string
|
||||
}
|
||||
|
||||
// OwnerConfig represents the initial owner/admin user for the embedded IdP.
|
||||
@@ -74,6 +76,22 @@ type OwnerConfig struct {
|
||||
Username string
|
||||
}
|
||||
|
||||
// buildIdpStorageConfig builds the Dex storage config map based on the storage type.
|
||||
func buildIdpStorageConfig(storageType string, cfg EmbeddedStorageTypeConfig) (map[string]interface{}, error) {
|
||||
switch storageType {
|
||||
case "sqlite3":
|
||||
return map[string]interface{}{
|
||||
"file": cfg.File,
|
||||
}, nil
|
||||
case "postgres":
|
||||
return map[string]interface{}{
|
||||
"dsn": cfg.DSN,
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported IdP storage type: %s", storageType)
|
||||
}
|
||||
}
|
||||
|
||||
// ToYAMLConfig converts EmbeddedIdPConfig to dex.YAMLConfig.
|
||||
func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) {
|
||||
if c.Issuer == "" {
|
||||
@@ -85,6 +103,14 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) {
|
||||
if c.Storage.Type == "sqlite3" && c.Storage.Config.File == "" {
|
||||
return nil, fmt.Errorf("storage file is required for sqlite3")
|
||||
}
|
||||
if c.Storage.Type == "postgres" && c.Storage.Config.DSN == "" {
|
||||
return nil, fmt.Errorf("storage DSN is required for postgres")
|
||||
}
|
||||
|
||||
storageConfig, err := buildIdpStorageConfig(c.Storage.Type, c.Storage.Config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid IdP storage config: %w", err)
|
||||
}
|
||||
|
||||
// Build CLI redirect URIs including the device callback (both relative and absolute)
|
||||
cliRedirectURIs := c.CLIRedirectURIs
|
||||
@@ -100,10 +126,8 @@ func (c *EmbeddedIdPConfig) ToYAMLConfig() (*dex.YAMLConfig, error) {
|
||||
cfg := &dex.YAMLConfig{
|
||||
Issuer: c.Issuer,
|
||||
Storage: dex.Storage{
|
||||
Type: c.Storage.Type,
|
||||
Config: map[string]interface{}{
|
||||
"file": c.Storage.Config.File,
|
||||
},
|
||||
Type: c.Storage.Type,
|
||||
Config: storageConfig,
|
||||
},
|
||||
Web: dex.Web{
|
||||
AllowedOrigins: []string{"*"},
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/netbirdio/netbird/idp/dex"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/management/server/types"
|
||||
@@ -294,9 +294,9 @@ func (w *Worker) generateProperties(ctx context.Context) properties {
|
||||
localUsers++
|
||||
} else {
|
||||
idpUsers++
|
||||
idpType := extractIdpType(idpID)
|
||||
embeddedIdpTypes[idpType]++
|
||||
}
|
||||
idpType := extractIdpType(idpID)
|
||||
embeddedIdpTypes[idpType]++
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -358,12 +358,12 @@ func (w *Worker) generateProperties(ctx context.Context) properties {
|
||||
}
|
||||
servicesTargets += len(service.Targets)
|
||||
|
||||
switch reverseproxy.ProxyStatus(service.Meta.Status) {
|
||||
case reverseproxy.StatusActive:
|
||||
switch rpservice.Status(service.Meta.Status) {
|
||||
case rpservice.StatusActive:
|
||||
servicesStatusActive++
|
||||
case reverseproxy.StatusPending:
|
||||
case rpservice.StatusPending:
|
||||
servicesStatusPending++
|
||||
case reverseproxy.StatusError, reverseproxy.StatusCertificateFailed, reverseproxy.StatusTunnelNotCreated:
|
||||
case rpservice.StatusError, rpservice.StatusCertificateFailed, rpservice.StatusTunnelNotCreated:
|
||||
servicesStatusError++
|
||||
}
|
||||
|
||||
@@ -531,6 +531,9 @@ func createPostRequest(ctx context.Context, endpoint string, payloadStr string)
|
||||
// Connector IDs are formatted as "<type>-<xid>" (e.g., "okta-abc123", "zitadel-xyz").
|
||||
// Returns the type prefix, or "oidc" if no known prefix is found.
|
||||
func extractIdpType(connectorID string) string {
|
||||
if connectorID == "local" {
|
||||
return "local"
|
||||
}
|
||||
idx := strings.LastIndex(connectorID, "-")
|
||||
if idx <= 0 {
|
||||
return "oidc"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/idp/dex"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
|
||||
routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types"
|
||||
networkTypes "github.com/netbirdio/netbird/management/server/networks/types"
|
||||
@@ -29,6 +29,7 @@ func (mockDatasource) GetAllConnectedPeers() map[string]struct{} {
|
||||
func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account {
|
||||
localUserID := dex.EncodeDexUserID("10", "local")
|
||||
idpUserID := dex.EncodeDexUserID("20", "zitadel-d5uv82dra0haedlf6kv0")
|
||||
oidcUserID := dex.EncodeDexUserID("30", "d6jvvp69kmnc73c9pl40")
|
||||
return []*types.Account{
|
||||
{
|
||||
Id: "1",
|
||||
@@ -116,29 +117,29 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account {
|
||||
},
|
||||
},
|
||||
},
|
||||
Services: []*reverseproxy.Service{
|
||||
Services: []*rpservice.Service{
|
||||
{
|
||||
ID: "svc1",
|
||||
Enabled: true,
|
||||
Targets: []*reverseproxy.Target{
|
||||
Targets: []*rpservice.Target{
|
||||
{TargetType: "peer"},
|
||||
{TargetType: "host"},
|
||||
},
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
PasswordAuth: &reverseproxy.PasswordAuthConfig{Enabled: true},
|
||||
Auth: rpservice.AuthConfig{
|
||||
PasswordAuth: &rpservice.PasswordAuthConfig{Enabled: true},
|
||||
},
|
||||
Meta: reverseproxy.ServiceMeta{Status: string(reverseproxy.StatusActive)},
|
||||
Meta: rpservice.Meta{Status: string(rpservice.StatusActive)},
|
||||
},
|
||||
{
|
||||
ID: "svc2",
|
||||
Enabled: false,
|
||||
Targets: []*reverseproxy.Target{
|
||||
Targets: []*rpservice.Target{
|
||||
{TargetType: "domain"},
|
||||
},
|
||||
Auth: reverseproxy.AuthConfig{
|
||||
BearerAuth: &reverseproxy.BearerAuthConfig{Enabled: true},
|
||||
Auth: rpservice.AuthConfig{
|
||||
BearerAuth: &rpservice.BearerAuthConfig{Enabled: true},
|
||||
},
|
||||
Meta: reverseproxy.ServiceMeta{Status: string(reverseproxy.StatusPending)},
|
||||
Meta: rpservice.Meta{Status: string(rpservice.StatusPending)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -206,6 +207,13 @@ func (mockDatasource) GetAllAccounts(_ context.Context) []*types.Account {
|
||||
"1": {},
|
||||
},
|
||||
},
|
||||
oidcUserID: {
|
||||
Id: oidcUserID,
|
||||
IsServiceUser: false,
|
||||
PATs: map[string]*types.PersonalAccessToken{
|
||||
"1": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
Networks: []*networkTypes.Network{
|
||||
{
|
||||
@@ -278,14 +286,14 @@ func TestGenerateProperties(t *testing.T) {
|
||||
if properties["rules"] != 4 {
|
||||
t.Errorf("expected 4 rules, got %d", properties["rules"])
|
||||
}
|
||||
if properties["users"] != 2 {
|
||||
t.Errorf("expected 1 users, got %d", properties["users"])
|
||||
if properties["users"] != 3 {
|
||||
t.Errorf("expected 3 users, got %d", properties["users"])
|
||||
}
|
||||
if properties["setup_keys_usage"] != 2 {
|
||||
t.Errorf("expected 1 setup_keys_usage, got %d", properties["setup_keys_usage"])
|
||||
}
|
||||
if properties["pats"] != 4 {
|
||||
t.Errorf("expected 4 personal_access_tokens, got %d", properties["pats"])
|
||||
if properties["pats"] != 5 {
|
||||
t.Errorf("expected 5 personal_access_tokens, got %d", properties["pats"])
|
||||
}
|
||||
if properties["peers_ssh_enabled"] != 2 {
|
||||
t.Errorf("expected 2 peers_ssh_enabled, got %d", properties["peers_ssh_enabled"])
|
||||
@@ -369,14 +377,20 @@ func TestGenerateProperties(t *testing.T) {
|
||||
if properties["local_users_count"] != 1 {
|
||||
t.Errorf("expected 1 local_users_count, got %d", properties["local_users_count"])
|
||||
}
|
||||
if properties["idp_users_count"] != 1 {
|
||||
t.Errorf("expected 1 idp_users_count, got %d", properties["idp_users_count"])
|
||||
if properties["idp_users_count"] != 2 {
|
||||
t.Errorf("expected 2 idp_users_count, got %d", properties["idp_users_count"])
|
||||
}
|
||||
if properties["embedded_idp_users_local"] != 1 {
|
||||
t.Errorf("expected 1 embedded_idp_users_local, got %v", properties["embedded_idp_users_local"])
|
||||
}
|
||||
if properties["embedded_idp_users_zitadel"] != 1 {
|
||||
t.Errorf("expected 1 embedded_idp_users_zitadel, got %v", properties["embedded_idp_users_zitadel"])
|
||||
}
|
||||
if properties["embedded_idp_count"] != 1 {
|
||||
t.Errorf("expected 1 embedded_idp_count, got %v", properties["embedded_idp_count"])
|
||||
if properties["embedded_idp_users_oidc"] != 1 {
|
||||
t.Errorf("expected 1 embedded_idp_users_oidc, got %v", properties["embedded_idp_users_oidc"])
|
||||
}
|
||||
if properties["embedded_idp_count"] != 3 {
|
||||
t.Errorf("expected 3 embedded_idp_count, got %v", properties["embedded_idp_count"])
|
||||
}
|
||||
|
||||
if properties["services"] != 2 {
|
||||
@@ -436,7 +450,8 @@ func TestExtractIdpType(t *testing.T) {
|
||||
{"microsoft-abc123", "microsoft"},
|
||||
{"authentik-abc123", "authentik"},
|
||||
{"keycloak-d5uv82dra0haedlf6kv0", "keycloak"},
|
||||
{"local", "oidc"},
|
||||
{"local", "local"},
|
||||
{"d6jvvp69kmnc73c9pl40", "oidc"},
|
||||
{"", "oidc"},
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/server/account"
|
||||
"github.com/netbirdio/netbird/management/server/activity"
|
||||
"github.com/netbirdio/netbird/management/server/idp"
|
||||
@@ -148,7 +148,7 @@ type MockAccountManager struct {
|
||||
DeleteUserInviteFunc func(ctx context.Context, accountID, initiatorUserID, inviteID string) error
|
||||
}
|
||||
|
||||
func (am *MockAccountManager) SetServiceManager(serviceManager reverseproxy.Manager) {
|
||||
func (am *MockAccountManager) SetServiceManager(serviceManager service.Manager) {
|
||||
// Mock implementation - no-op
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/server/account"
|
||||
"github.com/netbirdio/netbird/management/server/activity"
|
||||
"github.com/netbirdio/netbird/management/server/groups"
|
||||
@@ -33,23 +33,23 @@ type Manager interface {
|
||||
}
|
||||
|
||||
type managerImpl struct {
|
||||
store store.Store
|
||||
permissionsManager permissions.Manager
|
||||
groupsManager groups.Manager
|
||||
accountManager account.Manager
|
||||
reverseProxyManager reverseproxy.Manager
|
||||
store store.Store
|
||||
permissionsManager permissions.Manager
|
||||
groupsManager groups.Manager
|
||||
accountManager account.Manager
|
||||
serviceManager service.Manager
|
||||
}
|
||||
|
||||
type mockManager struct {
|
||||
}
|
||||
|
||||
func NewManager(store store.Store, permissionsManager permissions.Manager, groupsManager groups.Manager, accountManager account.Manager, reverseproxyManager reverseproxy.Manager) Manager {
|
||||
func NewManager(store store.Store, permissionsManager permissions.Manager, groupsManager groups.Manager, accountManager account.Manager, reverseproxyManager service.Manager) Manager {
|
||||
return &managerImpl{
|
||||
store: store,
|
||||
permissionsManager: permissionsManager,
|
||||
groupsManager: groupsManager,
|
||||
accountManager: accountManager,
|
||||
reverseProxyManager: reverseproxyManager,
|
||||
store: store,
|
||||
permissionsManager: permissionsManager,
|
||||
groupsManager: groupsManager,
|
||||
accountManager: accountManager,
|
||||
serviceManager: reverseproxyManager,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,7 +264,7 @@ func (m *managerImpl) UpdateResource(ctx context.Context, userID string, resourc
|
||||
|
||||
// TODO: optimize to only reload reverse proxies that are affected by the resource update instead of all of them
|
||||
go func() {
|
||||
err := m.reverseProxyManager.ReloadAllServicesForAccount(ctx, resource.AccountID)
|
||||
err := m.serviceManager.ReloadAllServicesForAccount(ctx, resource.AccountID)
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Warnf("failed to reload all proxies for account: %v", err)
|
||||
}
|
||||
@@ -322,7 +322,7 @@ func (m *managerImpl) DeleteResource(ctx context.Context, accountID, userID, net
|
||||
return status.NewPermissionDeniedError()
|
||||
}
|
||||
|
||||
serviceID, err := m.reverseProxyManager.GetServiceIDByTargetID(ctx, accountID, resourceID)
|
||||
serviceID, err := m.serviceManager.GetServiceIDByTargetID(ctx, accountID, resourceID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if resource is used by service: %w", err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/server/groups"
|
||||
"github.com/netbirdio/netbird/management/server/mock_server"
|
||||
"github.com/netbirdio/netbird/management/server/networks/resources/types"
|
||||
@@ -31,8 +31,8 @@ func Test_GetAllResourcesInNetworkReturnsResources(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
resources, err := manager.GetAllResourcesInNetwork(ctx, accountID, userID, networkID)
|
||||
require.NoError(t, err)
|
||||
@@ -54,8 +54,8 @@ func Test_GetAllResourcesInNetworkReturnsPermissionDenied(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
resources, err := manager.GetAllResourcesInNetwork(ctx, accountID, userID, networkID)
|
||||
require.Error(t, err)
|
||||
@@ -76,8 +76,8 @@ func Test_GetAllResourcesInAccountReturnsResources(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
resources, err := manager.GetAllResourcesInAccount(ctx, accountID, userID)
|
||||
require.NoError(t, err)
|
||||
@@ -98,8 +98,8 @@ func Test_GetAllResourcesInAccountReturnsPermissionDenied(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
resources, err := manager.GetAllResourcesInAccount(ctx, accountID, userID)
|
||||
require.Error(t, err)
|
||||
@@ -123,8 +123,8 @@ func Test_GetResourceInNetworkReturnsResources(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
resource, err := manager.GetResource(ctx, accountID, userID, networkID, resourceID)
|
||||
require.NoError(t, err)
|
||||
@@ -147,8 +147,8 @@ func Test_GetResourceInNetworkReturnsPermissionDenied(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
resources, err := manager.GetResource(ctx, accountID, userID, networkID, resourceID)
|
||||
require.Error(t, err)
|
||||
@@ -176,9 +176,9 @@ func Test_CreateResourceSuccessfully(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
reverseProxyManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), resource.AccountID).Return(nil).AnyTimes()
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
serviceManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), resource.AccountID).Return(nil).AnyTimes()
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
createdResource, err := manager.CreateResource(ctx, userID, resource)
|
||||
require.NoError(t, err)
|
||||
@@ -205,8 +205,8 @@ func Test_CreateResourceFailsWithPermissionDenied(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
createdResource, err := manager.CreateResource(ctx, userID, resource)
|
||||
require.Error(t, err)
|
||||
@@ -234,8 +234,8 @@ func Test_CreateResourceFailsWithInvalidAddress(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
createdResource, err := manager.CreateResource(ctx, userID, resource)
|
||||
require.Error(t, err)
|
||||
@@ -262,8 +262,8 @@ func Test_CreateResourceFailsWithUsedName(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
createdResource, err := manager.CreateResource(ctx, userID, resource)
|
||||
require.Error(t, err)
|
||||
@@ -294,9 +294,9 @@ func Test_UpdateResourceSuccessfully(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
reverseProxyManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), accountID).Return(nil).AnyTimes()
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
serviceManager.EXPECT().ReloadAllServicesForAccount(gomock.Any(), accountID).Return(nil).AnyTimes()
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
updatedResource, err := manager.UpdateResource(ctx, userID, resource)
|
||||
require.NoError(t, err)
|
||||
@@ -329,8 +329,8 @@ func Test_UpdateResourceFailsWithResourceNotFound(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
updatedResource, err := manager.UpdateResource(ctx, userID, resource)
|
||||
require.Error(t, err)
|
||||
@@ -361,8 +361,8 @@ func Test_UpdateResourceFailsWithNameInUse(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
updatedResource, err := manager.UpdateResource(ctx, userID, resource)
|
||||
require.Error(t, err)
|
||||
@@ -392,8 +392,8 @@ func Test_UpdateResourceFailsWithPermissionDenied(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
updatedResource, err := manager.UpdateResource(ctx, userID, resource)
|
||||
require.Error(t, err)
|
||||
@@ -416,9 +416,9 @@ func Test_DeleteResourceSuccessfully(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
reverseProxyManager.EXPECT().GetServiceIDByTargetID(gomock.Any(), accountID, resourceID).Return("", nil).AnyTimes()
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
serviceManager.EXPECT().GetServiceIDByTargetID(gomock.Any(), accountID, resourceID).Return("", nil).AnyTimes()
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
err = manager.DeleteResource(ctx, accountID, userID, networkID, resourceID)
|
||||
require.NoError(t, err)
|
||||
@@ -440,8 +440,8 @@ func Test_DeleteResourceFailsWithPermissionDenied(t *testing.T) {
|
||||
am := mock_server.MockAccountManager{}
|
||||
groupsManager := groups.NewManagerMock()
|
||||
ctrl := gomock.NewController(t)
|
||||
reverseProxyManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, reverseProxyManager)
|
||||
serviceManager := reverseproxy.NewMockManager(ctrl)
|
||||
manager := NewManager(store, permissionsManager, groupsManager, &am, serviceManager)
|
||||
|
||||
err = manager.DeleteResource(ctx, accountID, userID, networkID, resourceID)
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -493,7 +493,7 @@ func (am *DefaultAccountManager) DeletePeer(ctx context.Context, accountID, peer
|
||||
var settings *types.Settings
|
||||
var eventsToStore []func()
|
||||
|
||||
serviceID, err := am.reverseProxyManager.GetServiceIDByTargetID(ctx, accountID, peerID)
|
||||
serviceID, err := am.serviceManager.GetServiceIDByTargetID(ctx, accountID, peerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if resource is used by service: %w", err)
|
||||
}
|
||||
|
||||
@@ -352,9 +352,10 @@ func (p *Peer) FromAPITemporaryAccessRequest(a *api.PeerTemporaryAccessRequest)
|
||||
p.Name = a.Name
|
||||
p.Key = a.WgPubKey
|
||||
p.Meta = PeerSystemMeta{
|
||||
Hostname: a.Name,
|
||||
GoOS: "js",
|
||||
OS: "js",
|
||||
Hostname: a.Name,
|
||||
GoOS: "js",
|
||||
OS: "js",
|
||||
KernelVersion: "wasm",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,9 +28,10 @@ import (
|
||||
"gorm.io/gorm/logger"
|
||||
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones/records"
|
||||
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
|
||||
@@ -131,8 +132,8 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met
|
||||
&types.Account{}, &types.Policy{}, &types.PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{},
|
||||
&installation{}, &types.ExtraSettings{}, &posture.Checks{}, &nbpeer.NetworkAddress{},
|
||||
&networkTypes.Network{}, &routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{}, &types.AccountOnboarding{},
|
||||
&types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, &reverseproxy.Service{}, &reverseproxy.Target{}, &domain.Domain{},
|
||||
&accesslogs.AccessLogEntry{},
|
||||
&types.Job{}, &zones.Zone{}, &records.Record{}, &types.UserInviteRecord{}, &rpservice.Service{}, &rpservice.Target{}, &domain.Domain{},
|
||||
&accesslogs.AccessLogEntry{}, &proxy.Proxy{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("auto migratePreAuto: %w", err)
|
||||
@@ -2075,7 +2076,7 @@ func (s *SqlStore) getPostureChecks(ctx context.Context, accountID string) ([]*p
|
||||
return checks, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpservice.Service, error) {
|
||||
const serviceQuery = `SELECT id, account_id, name, domain, enabled, auth,
|
||||
meta_created_at, meta_certificate_issued_at, meta_status, proxy_cluster,
|
||||
pass_host_header, rewrite_redirects, session_private_key, session_public_key
|
||||
@@ -2090,8 +2091,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers
|
||||
return nil, err
|
||||
}
|
||||
|
||||
services, err := pgx.CollectRows(serviceRows, func(row pgx.CollectableRow) (*reverseproxy.Service, error) {
|
||||
var s reverseproxy.Service
|
||||
services, err := pgx.CollectRows(serviceRows, func(row pgx.CollectableRow) (*rpservice.Service, error) {
|
||||
var s rpservice.Service
|
||||
var auth []byte
|
||||
var createdAt, certIssuedAt sql.NullTime
|
||||
var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString
|
||||
@@ -2121,7 +2122,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers
|
||||
}
|
||||
}
|
||||
|
||||
s.Meta = reverseproxy.ServiceMeta{}
|
||||
s.Meta = rpservice.Meta{}
|
||||
if createdAt.Valid {
|
||||
s.Meta.CreatedAt = createdAt.Time
|
||||
}
|
||||
@@ -2142,7 +2143,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers
|
||||
s.SessionPublicKey = sessionPublicKey.String
|
||||
}
|
||||
|
||||
s.Targets = []*reverseproxy.Target{}
|
||||
s.Targets = []*rpservice.Target{}
|
||||
return &s, nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2154,7 +2155,7 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers
|
||||
}
|
||||
|
||||
serviceIDs := make([]string, len(services))
|
||||
serviceMap := make(map[string]*reverseproxy.Service)
|
||||
serviceMap := make(map[string]*rpservice.Service)
|
||||
for i, s := range services {
|
||||
serviceIDs[i] = s.ID
|
||||
serviceMap[s.ID] = s
|
||||
@@ -2165,8 +2166,8 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*revers
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targets, err := pgx.CollectRows(targetRows, func(row pgx.CollectableRow) (*reverseproxy.Target, error) {
|
||||
var t reverseproxy.Target
|
||||
targets, err := pgx.CollectRows(targetRows, func(row pgx.CollectableRow) (*rpservice.Target, error) {
|
||||
var t rpservice.Target
|
||||
var path sql.NullString
|
||||
err := row.Scan(
|
||||
&t.ID,
|
||||
@@ -2728,14 +2729,28 @@ func (s *SqlStore) GetStoreEngine() types.Engine {
|
||||
|
||||
// NewSqliteStore creates a new SQLite store.
|
||||
func NewSqliteStore(ctx context.Context, dataDir string, metrics telemetry.AppMetrics, skipMigration bool) (*SqlStore, error) {
|
||||
storeStr := fmt.Sprintf("%s?cache=shared", storeSqliteFileName)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Vo avoid `The process cannot access the file because it is being used by another process` on Windows
|
||||
storeStr = storeSqliteFileName
|
||||
storeFile := storeSqliteFileName
|
||||
if envFile, ok := os.LookupEnv("NB_STORE_ENGINE_SQLITE_FILE"); ok && envFile != "" {
|
||||
storeFile = envFile
|
||||
}
|
||||
|
||||
file := filepath.Join(dataDir, storeStr)
|
||||
db, err := gorm.Open(sqlite.Open(file), getGormConfig())
|
||||
// Separate file path from any SQLite URI query parameters (e.g., "store.db?mode=rwc")
|
||||
filePath, query, hasQuery := strings.Cut(storeFile, "?")
|
||||
|
||||
connStr := filePath
|
||||
if !filepath.IsAbs(filePath) {
|
||||
connStr = filepath.Join(dataDir, filePath)
|
||||
}
|
||||
|
||||
// Append query parameters: user-provided take precedence, otherwise default to cache=shared on non-Windows
|
||||
if hasQuery {
|
||||
connStr += "?" + query
|
||||
} else if runtime.GOOS != "windows" {
|
||||
// To avoid `The process cannot access the file because it is being used by another process` on Windows
|
||||
connStr += "?cache=shared"
|
||||
}
|
||||
|
||||
db, err := gorm.Open(sqlite.Open(connStr), getGormConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -4838,7 +4853,7 @@ func (s *SqlStore) GetPeerIDByKey(ctx context.Context, lockStrength LockingStren
|
||||
return peerID, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) CreateService(ctx context.Context, service *reverseproxy.Service) error {
|
||||
func (s *SqlStore) CreateService(ctx context.Context, service *rpservice.Service) error {
|
||||
serviceCopy := service.Copy()
|
||||
if err := serviceCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil {
|
||||
return fmt.Errorf("encrypt service data: %w", err)
|
||||
@@ -4852,16 +4867,19 @@ func (s *SqlStore) CreateService(ctx context.Context, service *reverseproxy.Serv
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) UpdateService(ctx context.Context, service *reverseproxy.Service) error {
|
||||
func (s *SqlStore) UpdateService(ctx context.Context, service *rpservice.Service) error {
|
||||
serviceCopy := service.Copy()
|
||||
if err := serviceCopy.EncryptSensitiveData(s.fieldEncrypt); err != nil {
|
||||
return fmt.Errorf("encrypt service data: %w", err)
|
||||
}
|
||||
|
||||
// Create target type instance outside transaction to avoid variable shadowing
|
||||
targetType := &rpservice.Target{}
|
||||
|
||||
// Use a transaction to ensure atomic updates of the service and its targets
|
||||
err := s.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Delete existing targets
|
||||
if err := tx.Where("service_id = ?", serviceCopy.ID).Delete(&reverseproxy.Target{}).Error; err != nil {
|
||||
if err := tx.Where("service_id = ?", serviceCopy.ID).Delete(targetType).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4882,7 +4900,7 @@ func (s *SqlStore) UpdateService(ctx context.Context, service *reverseproxy.Serv
|
||||
}
|
||||
|
||||
func (s *SqlStore) DeleteService(ctx context.Context, accountID, serviceID string) error {
|
||||
result := s.db.Delete(&reverseproxy.Service{}, accountAndIDQueryCondition, accountID, serviceID)
|
||||
result := s.db.Delete(&rpservice.Service{}, accountAndIDQueryCondition, accountID, serviceID)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to delete service from store: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "failed to delete service from store")
|
||||
@@ -4895,13 +4913,53 @@ func (s *SqlStore) DeleteService(ctx context.Context, accountID, serviceID strin
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) {
|
||||
func (s *SqlStore) DeleteTarget(ctx context.Context, accountID string, serviceID string, targetID uint) error {
|
||||
result := s.db.Delete(&rpservice.Target{}, "account_id = ? AND service_id = ? AND id = ?", accountID, serviceID, targetID)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to delete target from store: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "failed to delete target from store")
|
||||
}
|
||||
|
||||
if result.RowsAffected == 0 {
|
||||
return status.Errorf(status.NotFound, "target not found for service %s", serviceID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error {
|
||||
result := s.db.Delete(&rpservice.Target{}, "account_id = ? AND service_id = ?", accountID, serviceID)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to delete targets from store: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "failed to delete targets from store")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTargetsByServiceID retrieves all targets for a given service
|
||||
func (s *SqlStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*rpservice.Target, error) {
|
||||
var targets []*rpservice.Target
|
||||
tx := s.db
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
result := tx.Where("account_id = ? AND service_id = ?", accountID, serviceID).Find(&targets)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get targets from store: %v", result.Error)
|
||||
return nil, status.Errorf(status.Internal, "failed to get targets from store")
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*rpservice.Service, error) {
|
||||
tx := s.db.Preload("Targets")
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
|
||||
var service *reverseproxy.Service
|
||||
var service *rpservice.Service
|
||||
result := tx.Take(&service, accountAndIDQueryCondition, accountID, serviceID)
|
||||
if result.Error != nil {
|
||||
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
@@ -4919,31 +4977,9 @@ func (s *SqlStore) GetServiceByID(ctx context.Context, lockStrength LockingStren
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) {
|
||||
tx := s.db.Preload("Targets")
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
|
||||
var serviceList []*reverseproxy.Service
|
||||
result := tx.Find(&serviceList, accountIDCondition, accountID)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error)
|
||||
return nil, status.Errorf(status.Internal, "failed to get services from store")
|
||||
}
|
||||
|
||||
for _, service := range serviceList {
|
||||
if err := service.DecryptSensitiveData(s.fieldEncrypt); err != nil {
|
||||
return nil, fmt.Errorf("decrypt service data: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return serviceList, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) {
|
||||
var service *reverseproxy.Service
|
||||
result := s.db.Preload("Targets").Where("account_id = ? AND domain = ?", accountID, domain).First(&service)
|
||||
func (s *SqlStore) GetServiceByDomain(ctx context.Context, domain string) (*rpservice.Service, error) {
|
||||
var service *rpservice.Service
|
||||
result := s.db.Preload("Targets").Where("domain = ?", domain).First(&service)
|
||||
if result.Error != nil {
|
||||
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, status.Errorf(status.NotFound, "service with domain %s not found", domain)
|
||||
@@ -4960,13 +4996,13 @@ func (s *SqlStore) GetServiceByDomain(ctx context.Context, accountID, domain str
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) {
|
||||
func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error) {
|
||||
tx := s.db.Preload("Targets")
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
|
||||
var serviceList []*reverseproxy.Service
|
||||
var serviceList []*rpservice.Service
|
||||
result := tx.Find(&serviceList)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error)
|
||||
@@ -4982,13 +5018,13 @@ func (s *SqlStore) GetServices(ctx context.Context, lockStrength LockingStrength
|
||||
return serviceList, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error) {
|
||||
tx := s.db.Preload("Targets")
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
|
||||
var serviceList []*reverseproxy.Service
|
||||
var serviceList []*rpservice.Service
|
||||
result := tx.Find(&serviceList, accountIDCondition, accountID)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get services from the store: %s", result.Error)
|
||||
@@ -5004,6 +5040,99 @@ func (s *SqlStore) GetAccountServices(ctx context.Context, lockStrength LockingS
|
||||
return serviceList, nil
|
||||
}
|
||||
|
||||
// RenewEphemeralService updates the last_renewed_at timestamp for an ephemeral service.
|
||||
func (s *SqlStore) RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error {
|
||||
result := s.db.Model(&rpservice.Service{}).
|
||||
Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral).
|
||||
Update("meta_last_renewed_at", time.Now())
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to renew ephemeral service: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "renew ephemeral service")
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
return status.Errorf(status.NotFound, "no active expose session for domain %s", domain)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetExpiredEphemeralServices returns ephemeral services whose last renewal exceeds the given TTL.
|
||||
// Only the fields needed for reaping are selected. The limit parameter caps the batch size to
|
||||
// avoid loading too many rows in a single tick. Rows with empty source_peer are excluded to
|
||||
// skip malformed legacy data.
|
||||
func (s *SqlStore) GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*rpservice.Service, error) {
|
||||
cutoff := time.Now().Add(-ttl)
|
||||
var services []*rpservice.Service
|
||||
result := s.db.
|
||||
Select("id", "account_id", "source_peer", "domain").
|
||||
Where("source = ? AND source_peer <> '' AND meta_last_renewed_at < ?", rpservice.SourceEphemeral, cutoff).
|
||||
Limit(limit).
|
||||
Find(&services)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get expired ephemeral services: %v", result.Error)
|
||||
return nil, status.Errorf(status.Internal, "get expired ephemeral services")
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// CountEphemeralServicesByPeer returns the count of ephemeral services for a specific peer.
|
||||
// Use LockingStrengthUpdate inside a transaction to serialize concurrent create operations.
|
||||
// The locking is applied via a row-level SELECT ... FOR UPDATE (not on the aggregate) to
|
||||
// stay compatible with Postgres, which disallows FOR UPDATE on COUNT(*).
|
||||
func (s *SqlStore) CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error) {
|
||||
if lockStrength == LockingStrengthNone {
|
||||
var count int64
|
||||
result := s.db.Model(&rpservice.Service{}).
|
||||
Where("account_id = ? AND source_peer = ? AND source = ?", accountID, peerID, rpservice.SourceEphemeral).
|
||||
Count(&count)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to count ephemeral services: %v", result.Error)
|
||||
return 0, status.Errorf(status.Internal, "count ephemeral services")
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
var ids []string
|
||||
result := s.db.Model(&rpservice.Service{}).
|
||||
Clauses(clause.Locking{Strength: string(lockStrength)}).
|
||||
Select("id").
|
||||
Where("account_id = ? AND source_peer = ? AND source = ?", accountID, peerID, rpservice.SourceEphemeral).
|
||||
Pluck("id", &ids)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to count ephemeral services: %v", result.Error)
|
||||
return 0, status.Errorf(status.Internal, "count ephemeral services")
|
||||
}
|
||||
return int64(len(ids)), nil
|
||||
}
|
||||
|
||||
// EphemeralServiceExists checks if an ephemeral service exists for the given peer and domain.
|
||||
// Use LockingStrengthUpdate inside a transaction to serialize concurrent create operations.
|
||||
func (s *SqlStore) EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error) {
|
||||
if lockStrength == LockingStrengthNone {
|
||||
var count int64
|
||||
result := s.db.Model(&rpservice.Service{}).
|
||||
Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral).
|
||||
Count(&count)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to check ephemeral service existence: %v", result.Error)
|
||||
return false, status.Errorf(status.Internal, "check ephemeral service existence")
|
||||
}
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
var id string
|
||||
result := s.db.Model(&rpservice.Service{}).
|
||||
Clauses(clause.Locking{Strength: string(lockStrength)}).
|
||||
Select("id").
|
||||
Where("account_id = ? AND source_peer = ? AND domain = ? AND source = ?", accountID, peerID, domain, rpservice.SourceEphemeral).
|
||||
Limit(1).
|
||||
Pluck("id", &id)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to check ephemeral service existence: %v", result.Error)
|
||||
return false, status.Errorf(status.Internal, "check ephemeral service existence")
|
||||
}
|
||||
return id != "", nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error) {
|
||||
tx := s.db
|
||||
|
||||
@@ -5216,13 +5345,13 @@ func (s *SqlStore) applyAccessLogFilters(query *gorm.DB, filter accesslogs.Acces
|
||||
return query
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error) {
|
||||
func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*rpservice.Target, error) {
|
||||
tx := s.db
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
|
||||
var target *reverseproxy.Target
|
||||
var target *rpservice.Target
|
||||
result := tx.Take(&target, "account_id = ? AND target_id = ?", accountID, targetID)
|
||||
if result.Error != nil {
|
||||
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
@@ -5235,3 +5364,65 @@ func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
// SaveProxy saves or updates a proxy in the database
|
||||
func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error {
|
||||
result := s.db.WithContext(ctx).Save(p)
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to save proxy: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "failed to save proxy")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProxyHeartbeat updates the last_seen timestamp for a proxy
|
||||
func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID string) error {
|
||||
result := s.db.WithContext(ctx).
|
||||
Model(&proxy.Proxy{}).
|
||||
Where("id = ? AND status = ?", proxyID, "connected").
|
||||
Update("last_seen", time.Now())
|
||||
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to update proxy heartbeat: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "failed to update proxy heartbeat")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetActiveProxyClusterAddresses returns all unique cluster addresses for active proxies
|
||||
func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) {
|
||||
var addresses []string
|
||||
|
||||
result := s.db.WithContext(ctx).
|
||||
Model(&proxy.Proxy{}).
|
||||
Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-2*time.Minute)).
|
||||
Distinct("cluster_address").
|
||||
Pluck("cluster_address", &addresses)
|
||||
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to get active proxy cluster addresses: %v", result.Error)
|
||||
return nil, status.Errorf(status.Internal, "failed to get active proxy cluster addresses")
|
||||
}
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// CleanupStaleProxies deletes proxies that haven't sent heartbeat in the specified duration
|
||||
func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error {
|
||||
cutoffTime := time.Now().Add(-inactivityDuration)
|
||||
|
||||
result := s.db.WithContext(ctx).
|
||||
Where("last_seen < ?", cutoffTime).
|
||||
Delete(&proxy.Proxy{})
|
||||
|
||||
if result.Error != nil {
|
||||
log.WithContext(ctx).Errorf("failed to cleanup stale proxies: %v", result.Error)
|
||||
return status.Errorf(status.Internal, "failed to cleanup stale proxies")
|
||||
}
|
||||
|
||||
if result.RowsAffected > 0 {
|
||||
log.WithContext(ctx).Infof("Cleaned up %d stale proxies", result.RowsAffected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
|
||||
routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types"
|
||||
networkTypes "github.com/netbirdio/netbird/management/server/networks/types"
|
||||
@@ -264,7 +264,7 @@ func setupBenchmarkDB(b testing.TB) (*SqlStore, func(), string) {
|
||||
&types.Policy{}, &types.PolicyRule{}, &route.Route{},
|
||||
&nbdns.NameServerGroup{}, &posture.Checks{}, &networkTypes.Network{},
|
||||
&routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{},
|
||||
&types.AccountOnboarding{}, &reverseproxy.Service{}, &reverseproxy.Target{},
|
||||
&types.AccountOnboarding{}, &service.Service{}, &service.Target{},
|
||||
}
|
||||
|
||||
for i := len(models) - 1; i >= 0; i-- {
|
||||
|
||||
@@ -25,9 +25,10 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones/records"
|
||||
"github.com/netbirdio/netbird/management/server/telemetry"
|
||||
@@ -252,14 +253,18 @@ type Store interface {
|
||||
MarkAllPendingJobsAsFailed(ctx context.Context, accountID, peerID, reason string) error
|
||||
GetPeerIDByKey(ctx context.Context, lockStrength LockingStrength, key string) (string, error)
|
||||
|
||||
CreateService(ctx context.Context, service *reverseproxy.Service) error
|
||||
UpdateService(ctx context.Context, service *reverseproxy.Service) error
|
||||
CreateService(ctx context.Context, service *rpservice.Service) error
|
||||
UpdateService(ctx context.Context, service *rpservice.Service) error
|
||||
DeleteService(ctx context.Context, accountID, serviceID string) error
|
||||
GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error)
|
||||
GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error)
|
||||
GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error)
|
||||
GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error)
|
||||
GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error)
|
||||
GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*rpservice.Service, error)
|
||||
GetServiceByDomain(ctx context.Context, domain string) (*rpservice.Service, error)
|
||||
GetServices(ctx context.Context, lockStrength LockingStrength) ([]*rpservice.Service, error)
|
||||
GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*rpservice.Service, error)
|
||||
|
||||
RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error
|
||||
GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*rpservice.Service, error)
|
||||
CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error)
|
||||
EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error)
|
||||
|
||||
GetCustomDomain(ctx context.Context, accountID string, domainID string) (*domain.Domain, error)
|
||||
ListFreeDomains(ctx context.Context, accountID string) ([]string, error)
|
||||
@@ -271,9 +276,16 @@ type Store interface {
|
||||
CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error
|
||||
GetAccountAccessLogs(ctx context.Context, lockStrength LockingStrength, accountID string, filter accesslogs.AccessLogFilter) ([]*accesslogs.AccessLogEntry, int64, error)
|
||||
DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error)
|
||||
GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*reverseproxy.Target, error)
|
||||
GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID string, targetID string) (*rpservice.Target, error)
|
||||
GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID string, serviceID string) ([]*rpservice.Target, error)
|
||||
DeleteTarget(ctx context.Context, accountID string, serviceID string, targetID uint) error
|
||||
DeleteServiceTargets(ctx context.Context, accountID string, serviceID string) error
|
||||
|
||||
SaveProxy(ctx context.Context, proxy *proxy.Proxy) error
|
||||
UpdateProxyHeartbeat(ctx context.Context, proxyID string) error
|
||||
GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error)
|
||||
CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error
|
||||
|
||||
// GetCustomDomainsCounts returns the total and validated custom domain counts.
|
||||
GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,10 @@ import (
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
dns "github.com/netbirdio/netbird/dns"
|
||||
reverseproxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
accesslogs "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/accesslogs"
|
||||
domain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
|
||||
proxy "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/proxy"
|
||||
service "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
zones "github.com/netbirdio/netbird/management/internals/modules/zones"
|
||||
records "github.com/netbirdio/netbird/management/internals/modules/zones/records"
|
||||
types "github.com/netbirdio/netbird/management/server/networks/resources/types"
|
||||
@@ -150,6 +151,20 @@ func (mr *MockStoreMockRecorder) ApproveAccountPeers(ctx, accountID interface{})
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApproveAccountPeers", reflect.TypeOf((*MockStore)(nil).ApproveAccountPeers), ctx, accountID)
|
||||
}
|
||||
|
||||
// CleanupStaleProxies mocks base method.
|
||||
func (m *MockStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CleanupStaleProxies", ctx, inactivityDuration)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// CleanupStaleProxies indicates an expected call of CleanupStaleProxies.
|
||||
func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration)
|
||||
}
|
||||
|
||||
// Close mocks base method.
|
||||
func (m *MockStore) Close(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -193,6 +208,21 @@ func (mr *MockStoreMockRecorder) CountAccountsByPrivateDomain(ctx, domain interf
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountAccountsByPrivateDomain", reflect.TypeOf((*MockStore)(nil).CountAccountsByPrivateDomain), ctx, domain)
|
||||
}
|
||||
|
||||
// CountEphemeralServicesByPeer mocks base method.
|
||||
func (m *MockStore) CountEphemeralServicesByPeer(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CountEphemeralServicesByPeer", ctx, lockStrength, accountID, peerID)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// CountEphemeralServicesByPeer indicates an expected call of CountEphemeralServicesByPeer.
|
||||
func (mr *MockStoreMockRecorder) CountEphemeralServicesByPeer(ctx, lockStrength, accountID, peerID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountEphemeralServicesByPeer", reflect.TypeOf((*MockStore)(nil).CountEphemeralServicesByPeer), ctx, lockStrength, accountID, peerID)
|
||||
}
|
||||
|
||||
// CreateAccessLog mocks base method.
|
||||
func (m *MockStore) CreateAccessLog(ctx context.Context, log *accesslogs.AccessLogEntry) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -293,7 +323,7 @@ func (mr *MockStoreMockRecorder) CreatePolicy(ctx, policy interface{}) *gomock.C
|
||||
}
|
||||
|
||||
// CreateService mocks base method.
|
||||
func (m *MockStore) CreateService(ctx context.Context, service *reverseproxy.Service) error {
|
||||
func (m *MockStore) CreateService(ctx context.Context, service *service.Service) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "CreateService", ctx, service)
|
||||
ret0, _ := ret[0].(error)
|
||||
@@ -559,6 +589,20 @@ func (mr *MockStoreMockRecorder) DeleteService(ctx, accountID, serviceID interfa
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockStore)(nil).DeleteService), ctx, accountID, serviceID)
|
||||
}
|
||||
|
||||
// DeleteServiceTargets mocks base method.
|
||||
func (m *MockStore) DeleteServiceTargets(ctx context.Context, accountID, serviceID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteServiceTargets", ctx, accountID, serviceID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteServiceTargets indicates an expected call of DeleteServiceTargets.
|
||||
func (mr *MockStoreMockRecorder) DeleteServiceTargets(ctx, accountID, serviceID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceTargets", reflect.TypeOf((*MockStore)(nil).DeleteServiceTargets), ctx, accountID, serviceID)
|
||||
}
|
||||
|
||||
// DeleteSetupKey mocks base method.
|
||||
func (m *MockStore) DeleteSetupKey(ctx context.Context, accountID, keyID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -573,6 +617,20 @@ func (mr *MockStoreMockRecorder) DeleteSetupKey(ctx, accountID, keyID interface{
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSetupKey", reflect.TypeOf((*MockStore)(nil).DeleteSetupKey), ctx, accountID, keyID)
|
||||
}
|
||||
|
||||
// DeleteTarget mocks base method.
|
||||
func (m *MockStore) DeleteTarget(ctx context.Context, accountID, serviceID string, targetID uint) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DeleteTarget", ctx, accountID, serviceID, targetID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// DeleteTarget indicates an expected call of DeleteTarget.
|
||||
func (mr *MockStoreMockRecorder) DeleteTarget(ctx, accountID, serviceID, targetID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTarget", reflect.TypeOf((*MockStore)(nil).DeleteTarget), ctx, accountID, serviceID, targetID)
|
||||
}
|
||||
|
||||
// DeleteTokenID2UserIDIndex mocks base method.
|
||||
func (m *MockStore) DeleteTokenID2UserIDIndex(tokenID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -643,6 +701,21 @@ func (mr *MockStoreMockRecorder) DeleteZoneDNSRecords(ctx, accountID, zoneID int
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteZoneDNSRecords", reflect.TypeOf((*MockStore)(nil).DeleteZoneDNSRecords), ctx, accountID, zoneID)
|
||||
}
|
||||
|
||||
// EphemeralServiceExists mocks base method.
|
||||
func (m *MockStore) EphemeralServiceExists(ctx context.Context, lockStrength LockingStrength, accountID, peerID, domain string) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EphemeralServiceExists", ctx, lockStrength, accountID, peerID, domain)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EphemeralServiceExists indicates an expected call of EphemeralServiceExists.
|
||||
func (mr *MockStoreMockRecorder) EphemeralServiceExists(ctx, lockStrength, accountID, peerID, domain interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EphemeralServiceExists", reflect.TypeOf((*MockStore)(nil).EphemeralServiceExists), ctx, lockStrength, accountID, peerID, domain)
|
||||
}
|
||||
|
||||
// ExecuteInTransaction mocks base method.
|
||||
func (m *MockStore) ExecuteInTransaction(ctx context.Context, f func(Store) error) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1095,10 +1168,10 @@ func (mr *MockStoreMockRecorder) GetAccountRoutes(ctx, lockStrength, accountID i
|
||||
}
|
||||
|
||||
// GetAccountServices mocks base method.
|
||||
func (m *MockStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) {
|
||||
func (m *MockStore) GetAccountServices(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*service.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetAccountServices", ctx, lockStrength, accountID)
|
||||
ret0, _ := ret[0].([]*reverseproxy.Service)
|
||||
ret0, _ := ret[0].([]*service.Service)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -1109,21 +1182,6 @@ func (mr *MockStoreMockRecorder) GetAccountServices(ctx, lockStrength, accountID
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountServices", reflect.TypeOf((*MockStore)(nil).GetAccountServices), ctx, lockStrength, accountID)
|
||||
}
|
||||
|
||||
// GetServicesByAccountID mocks base method.
|
||||
func (m *MockStore) GetServicesByAccountID(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*reverseproxy.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetServicesByAccountID", ctx, lockStrength, accountID)
|
||||
ret0, _ := ret[0].([]*reverseproxy.Service)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetServicesByAccountID indicates an expected call of GetServicesByAccountID.
|
||||
func (mr *MockStoreMockRecorder) GetServicesByAccountID(ctx, lockStrength, accountID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServicesByAccountID", reflect.TypeOf((*MockStore)(nil).GetServicesByAccountID), ctx, lockStrength, accountID)
|
||||
}
|
||||
|
||||
// GetAccountSettings mocks base method.
|
||||
func (m *MockStore) GetAccountSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types2.Settings, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1214,6 +1272,21 @@ func (mr *MockStoreMockRecorder) GetAccountsCounter(ctx interface{}) *gomock.Cal
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountsCounter", reflect.TypeOf((*MockStore)(nil).GetAccountsCounter), ctx)
|
||||
}
|
||||
|
||||
// GetActiveProxyClusterAddresses mocks base method.
|
||||
func (m *MockStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetActiveProxyClusterAddresses", ctx)
|
||||
ret0, _ := ret[0].([]string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetActiveProxyClusterAddresses indicates an expected call of GetActiveProxyClusterAddresses.
|
||||
func (mr *MockStoreMockRecorder) GetActiveProxyClusterAddresses(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveProxyClusterAddresses", reflect.TypeOf((*MockStore)(nil).GetActiveProxyClusterAddresses), ctx)
|
||||
}
|
||||
|
||||
// GetAllAccounts mocks base method.
|
||||
func (m *MockStore) GetAllAccounts(ctx context.Context) []*types2.Account {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1288,6 +1361,22 @@ func (mr *MockStoreMockRecorder) GetCustomDomain(ctx, accountID, domainID interf
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomain", reflect.TypeOf((*MockStore)(nil).GetCustomDomain), ctx, accountID, domainID)
|
||||
}
|
||||
|
||||
// GetCustomDomainsCounts mocks base method.
|
||||
func (m *MockStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCustomDomainsCounts", ctx)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(int64)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetCustomDomainsCounts indicates an expected call of GetCustomDomainsCounts.
|
||||
func (mr *MockStoreMockRecorder) GetCustomDomainsCounts(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomainsCounts", reflect.TypeOf((*MockStore)(nil).GetCustomDomainsCounts), ctx)
|
||||
}
|
||||
|
||||
// GetDNSRecordByID mocks base method.
|
||||
func (m *MockStore) GetDNSRecordByID(ctx context.Context, lockStrength LockingStrength, accountID, zoneID, recordID string) (*records.Record, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1303,6 +1392,21 @@ func (mr *MockStoreMockRecorder) GetDNSRecordByID(ctx, lockStrength, accountID,
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDNSRecordByID", reflect.TypeOf((*MockStore)(nil).GetDNSRecordByID), ctx, lockStrength, accountID, zoneID, recordID)
|
||||
}
|
||||
|
||||
// GetExpiredEphemeralServices mocks base method.
|
||||
func (m *MockStore) GetExpiredEphemeralServices(ctx context.Context, ttl time.Duration, limit int) ([]*service.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetExpiredEphemeralServices", ctx, ttl, limit)
|
||||
ret0, _ := ret[0].([]*service.Service)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetExpiredEphemeralServices indicates an expected call of GetExpiredEphemeralServices.
|
||||
func (mr *MockStoreMockRecorder) GetExpiredEphemeralServices(ctx, ttl, limit interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExpiredEphemeralServices", reflect.TypeOf((*MockStore)(nil).GetExpiredEphemeralServices), ctx, ttl, limit)
|
||||
}
|
||||
|
||||
// GetGroupByID mocks base method.
|
||||
func (m *MockStore) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types2.Group, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1828,25 +1932,25 @@ func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, rout
|
||||
}
|
||||
|
||||
// GetServiceByDomain mocks base method.
|
||||
func (m *MockStore) GetServiceByDomain(ctx context.Context, accountID, domain string) (*reverseproxy.Service, error) {
|
||||
func (m *MockStore) GetServiceByDomain(ctx context.Context, domain string) (*service.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetServiceByDomain", ctx, accountID, domain)
|
||||
ret0, _ := ret[0].(*reverseproxy.Service)
|
||||
ret := m.ctrl.Call(m, "GetServiceByDomain", ctx, domain)
|
||||
ret0, _ := ret[0].(*service.Service)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetServiceByDomain indicates an expected call of GetServiceByDomain.
|
||||
func (mr *MockStoreMockRecorder) GetServiceByDomain(ctx, accountID, domain interface{}) *gomock.Call {
|
||||
func (mr *MockStoreMockRecorder) GetServiceByDomain(ctx, domain interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByDomain", reflect.TypeOf((*MockStore)(nil).GetServiceByDomain), ctx, accountID, domain)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceByDomain", reflect.TypeOf((*MockStore)(nil).GetServiceByDomain), ctx, domain)
|
||||
}
|
||||
|
||||
// GetServiceByID mocks base method.
|
||||
func (m *MockStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*reverseproxy.Service, error) {
|
||||
func (m *MockStore) GetServiceByID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) (*service.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetServiceByID", ctx, lockStrength, accountID, serviceID)
|
||||
ret0, _ := ret[0].(*reverseproxy.Service)
|
||||
ret0, _ := ret[0].(*service.Service)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -1858,10 +1962,10 @@ func (mr *MockStoreMockRecorder) GetServiceByID(ctx, lockStrength, accountID, se
|
||||
}
|
||||
|
||||
// GetServiceTargetByTargetID mocks base method.
|
||||
func (m *MockStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID, targetID string) (*reverseproxy.Target, error) {
|
||||
func (m *MockStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength LockingStrength, accountID, targetID string) (*service.Target, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetServiceTargetByTargetID", ctx, lockStrength, accountID, targetID)
|
||||
ret0, _ := ret[0].(*reverseproxy.Target)
|
||||
ret0, _ := ret[0].(*service.Target)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -1872,27 +1976,11 @@ func (mr *MockStoreMockRecorder) GetServiceTargetByTargetID(ctx, lockStrength, a
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceTargetByTargetID", reflect.TypeOf((*MockStore)(nil).GetServiceTargetByTargetID), ctx, lockStrength, accountID, targetID)
|
||||
}
|
||||
|
||||
// GetCustomDomainsCounts mocks base method.
|
||||
func (m *MockStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetCustomDomainsCounts", ctx)
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(int64)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetCustomDomainsCounts indicates an expected call of GetCustomDomainsCounts.
|
||||
func (mr *MockStoreMockRecorder) GetCustomDomainsCounts(ctx interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCustomDomainsCounts", reflect.TypeOf((*MockStore)(nil).GetCustomDomainsCounts), ctx)
|
||||
}
|
||||
|
||||
// GetServices mocks base method.
|
||||
func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*reverseproxy.Service, error) {
|
||||
func (m *MockStore) GetServices(ctx context.Context, lockStrength LockingStrength) ([]*service.Service, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetServices", ctx, lockStrength)
|
||||
ret0, _ := ret[0].([]*reverseproxy.Service)
|
||||
ret0, _ := ret[0].([]*service.Service)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@@ -1962,6 +2050,21 @@ func (mr *MockStoreMockRecorder) GetTakenIPs(ctx, lockStrength, accountId interf
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTakenIPs", reflect.TypeOf((*MockStore)(nil).GetTakenIPs), ctx, lockStrength, accountId)
|
||||
}
|
||||
|
||||
// GetTargetsByServiceID mocks base method.
|
||||
func (m *MockStore) GetTargetsByServiceID(ctx context.Context, lockStrength LockingStrength, accountID, serviceID string) ([]*service.Target, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetTargetsByServiceID", ctx, lockStrength, accountID, serviceID)
|
||||
ret0, _ := ret[0].([]*service.Target)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetTargetsByServiceID indicates an expected call of GetTargetsByServiceID.
|
||||
func (mr *MockStoreMockRecorder) GetTargetsByServiceID(ctx, lockStrength, accountID, serviceID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTargetsByServiceID", reflect.TypeOf((*MockStore)(nil).GetTargetsByServiceID), ctx, lockStrength, accountID, serviceID)
|
||||
}
|
||||
|
||||
// GetTokenIDByHashedToken mocks base method.
|
||||
func (m *MockStore) GetTokenIDByHashedToken(ctx context.Context, secret string) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -2343,6 +2446,20 @@ func (mr *MockStoreMockRecorder) RemoveResourceFromGroup(ctx, accountId, groupID
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveResourceFromGroup", reflect.TypeOf((*MockStore)(nil).RemoveResourceFromGroup), ctx, accountId, groupID, resourceID)
|
||||
}
|
||||
|
||||
// RenewEphemeralService mocks base method.
|
||||
func (m *MockStore) RenewEphemeralService(ctx context.Context, accountID, peerID, domain string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RenewEphemeralService", ctx, accountID, peerID, domain)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// RenewEphemeralService indicates an expected call of RenewEphemeralService.
|
||||
func (mr *MockStoreMockRecorder) RenewEphemeralService(ctx, accountID, peerID, domain interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RenewEphemeralService", reflect.TypeOf((*MockStore)(nil).RenewEphemeralService), ctx, accountID, peerID, domain)
|
||||
}
|
||||
|
||||
// RevokeProxyAccessToken mocks base method.
|
||||
func (m *MockStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -2567,6 +2684,20 @@ func (mr *MockStoreMockRecorder) SavePostureChecks(ctx, postureCheck interface{}
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SavePostureChecks", reflect.TypeOf((*MockStore)(nil).SavePostureChecks), ctx, postureCheck)
|
||||
}
|
||||
|
||||
// SaveProxy mocks base method.
|
||||
func (m *MockStore) SaveProxy(ctx context.Context, proxy *proxy.Proxy) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SaveProxy", ctx, proxy)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SaveProxy indicates an expected call of SaveProxy.
|
||||
func (mr *MockStoreMockRecorder) SaveProxy(ctx, proxy interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveProxy", reflect.TypeOf((*MockStore)(nil).SaveProxy), ctx, proxy)
|
||||
}
|
||||
|
||||
// SaveProxyAccessToken mocks base method.
|
||||
func (m *MockStore) SaveProxyAccessToken(ctx context.Context, token *types2.ProxyAccessToken) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -2762,8 +2893,22 @@ func (mr *MockStoreMockRecorder) UpdateGroups(ctx, accountID, groups interface{}
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateGroups", reflect.TypeOf((*MockStore)(nil).UpdateGroups), ctx, accountID, groups)
|
||||
}
|
||||
|
||||
// UpdateProxyHeartbeat mocks base method.
|
||||
func (m *MockStore) UpdateProxyHeartbeat(ctx context.Context, proxyID string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateProxyHeartbeat", ctx, proxyID)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdateProxyHeartbeat indicates an expected call of UpdateProxyHeartbeat.
|
||||
func (mr *MockStoreMockRecorder) UpdateProxyHeartbeat(ctx, proxyID interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProxyHeartbeat", reflect.TypeOf((*MockStore)(nil).UpdateProxyHeartbeat), ctx, proxyID)
|
||||
}
|
||||
|
||||
// UpdateService mocks base method.
|
||||
func (m *MockStore) UpdateService(ctx context.Context, service *reverseproxy.Service) error {
|
||||
func (m *MockStore) UpdateService(ctx context.Context, service *service.Service) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateService", ctx, service)
|
||||
ret0, _ := ret[0].(error)
|
||||
|
||||
185
management/server/telemetry/account_aggregator.go
Normal file
185
management/server/telemetry/account_aggregator.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// AccountDurationAggregator uses OpenTelemetry histograms per account to calculate P95
|
||||
// without publishing individual account labels
|
||||
type AccountDurationAggregator struct {
|
||||
mu sync.RWMutex
|
||||
accounts map[string]*accountHistogram
|
||||
meterProvider *sdkmetric.MeterProvider
|
||||
manualReader *sdkmetric.ManualReader
|
||||
|
||||
FlushInterval time.Duration
|
||||
MaxAge time.Duration
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type accountHistogram struct {
|
||||
histogram metric.Int64Histogram
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
// NewAccountDurationAggregator creates aggregator using OTel histograms
|
||||
func NewAccountDurationAggregator(ctx context.Context, flushInterval, maxAge time.Duration) *AccountDurationAggregator {
|
||||
manualReader := sdkmetric.NewManualReader(
|
||||
sdkmetric.WithTemporalitySelector(func(kind sdkmetric.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.DeltaTemporality
|
||||
}),
|
||||
)
|
||||
|
||||
meterProvider := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(manualReader),
|
||||
)
|
||||
|
||||
return &AccountDurationAggregator{
|
||||
accounts: make(map[string]*accountHistogram),
|
||||
meterProvider: meterProvider,
|
||||
manualReader: manualReader,
|
||||
FlushInterval: flushInterval,
|
||||
MaxAge: maxAge,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// Record adds a duration for an account using OTel histogram
|
||||
func (a *AccountDurationAggregator) Record(accountID string, duration time.Duration) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
accHist, exists := a.accounts[accountID]
|
||||
if !exists {
|
||||
meter := a.meterProvider.Meter("account-aggregator")
|
||||
histogram, err := meter.Int64Histogram(
|
||||
"sync_duration_per_account",
|
||||
metric.WithUnit("milliseconds"),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
accHist = &accountHistogram{
|
||||
histogram: histogram,
|
||||
}
|
||||
a.accounts[accountID] = accHist
|
||||
}
|
||||
|
||||
accHist.histogram.Record(a.ctx, duration.Milliseconds(),
|
||||
metric.WithAttributes(attribute.String("account_id", accountID)))
|
||||
accHist.lastUpdate = time.Now()
|
||||
}
|
||||
|
||||
// FlushAndGetP95s extracts P95 from each account's histogram
|
||||
func (a *AccountDurationAggregator) FlushAndGetP95s() []int64 {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
var rm metricdata.ResourceMetrics
|
||||
err := a.manualReader.Collect(a.ctx, &rm)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
p95s := make([]int64, 0, len(a.accounts))
|
||||
|
||||
for _, scopeMetrics := range rm.ScopeMetrics {
|
||||
for _, metric := range scopeMetrics.Metrics {
|
||||
histogramData, ok := metric.Data.(metricdata.Histogram[int64])
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, dataPoint := range histogramData.DataPoints {
|
||||
a.processDataPoint(dataPoint, now, &p95s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a.cleanupStaleAccounts(now)
|
||||
|
||||
return p95s
|
||||
}
|
||||
|
||||
// processDataPoint extracts P95 from a single histogram data point
|
||||
func (a *AccountDurationAggregator) processDataPoint(dataPoint metricdata.HistogramDataPoint[int64], now time.Time, p95s *[]int64) {
|
||||
accountID := extractAccountID(dataPoint)
|
||||
if accountID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if p95 := calculateP95FromHistogram(dataPoint); p95 > 0 {
|
||||
*p95s = append(*p95s, p95)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStaleAccounts removes accounts that haven't been updated recently
|
||||
func (a *AccountDurationAggregator) cleanupStaleAccounts(now time.Time) {
|
||||
for accountID := range a.accounts {
|
||||
if a.isStaleAccount(accountID, now) {
|
||||
delete(a.accounts, accountID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extractAccountID retrieves the account_id from histogram data point attributes
|
||||
func extractAccountID(dp metricdata.HistogramDataPoint[int64]) string {
|
||||
for _, attr := range dp.Attributes.ToSlice() {
|
||||
if attr.Key == "account_id" {
|
||||
return attr.Value.AsString()
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// isStaleAccount checks if an account hasn't been updated recently
|
||||
func (a *AccountDurationAggregator) isStaleAccount(accountID string, now time.Time) bool {
|
||||
accHist, exists := a.accounts[accountID]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
return now.Sub(accHist.lastUpdate) > a.MaxAge
|
||||
}
|
||||
|
||||
// calculateP95FromHistogram computes P95 from OTel histogram data
|
||||
func calculateP95FromHistogram(dp metricdata.HistogramDataPoint[int64]) int64 {
|
||||
if dp.Count == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
targetCount := uint64(math.Ceil(float64(dp.Count) * 0.95))
|
||||
if targetCount == 0 {
|
||||
targetCount = 1
|
||||
}
|
||||
var cumulativeCount uint64
|
||||
|
||||
for i, bucketCount := range dp.BucketCounts {
|
||||
cumulativeCount += bucketCount
|
||||
if cumulativeCount >= targetCount {
|
||||
if i < len(dp.Bounds) {
|
||||
return int64(dp.Bounds[i])
|
||||
}
|
||||
if maxVal, defined := dp.Max.Value(); defined {
|
||||
return maxVal
|
||||
}
|
||||
return dp.Sum / int64(dp.Count)
|
||||
}
|
||||
}
|
||||
|
||||
return dp.Sum / int64(dp.Count)
|
||||
}
|
||||
|
||||
// Shutdown cleans up resources
|
||||
func (a *AccountDurationAggregator) Shutdown() error {
|
||||
return a.meterProvider.Shutdown(a.ctx)
|
||||
}
|
||||
219
management/server/telemetry/account_aggregator_test.go
Normal file
219
management/server/telemetry/account_aggregator_test.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDeltaTemporality_P95ReflectsCurrentWindow(t *testing.T) {
|
||||
// Verify that with delta temporality, each flush window only reflects
|
||||
// recordings since the last flush — not all-time data.
|
||||
ctx := context.Background()
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
// Window 1: Record 100 slow requests (500ms each)
|
||||
for range 100 {
|
||||
agg.Record("account-A", 500*time.Millisecond)
|
||||
}
|
||||
|
||||
p95sWindow1 := agg.FlushAndGetP95s()
|
||||
require.Len(t, p95sWindow1, 1, "should have P95 for one account")
|
||||
firstP95 := p95sWindow1[0]
|
||||
assert.GreaterOrEqual(t, firstP95, int64(200),
|
||||
"first window P95 should reflect the 500ms recordings")
|
||||
|
||||
// Window 2: Record 100 FAST requests (10ms each)
|
||||
for range 100 {
|
||||
agg.Record("account-A", 10*time.Millisecond)
|
||||
}
|
||||
|
||||
p95sWindow2 := agg.FlushAndGetP95s()
|
||||
require.Len(t, p95sWindow2, 1, "should have P95 for one account")
|
||||
secondP95 := p95sWindow2[0]
|
||||
|
||||
// With delta temporality the P95 should drop significantly because
|
||||
// the first window's slow recordings are no longer included.
|
||||
assert.Less(t, secondP95, firstP95,
|
||||
"second window P95 should be lower than first — delta temporality "+
|
||||
"ensures each window only reflects recent recordings")
|
||||
}
|
||||
|
||||
func TestEqualWeightPerAccount(t *testing.T) {
|
||||
// Verify that each account contributes exactly one P95 value,
|
||||
// regardless of how many requests it made.
|
||||
ctx := context.Background()
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
// Account A: 10,000 requests at 500ms (noisy customer)
|
||||
for range 10000 {
|
||||
agg.Record("account-A", 500*time.Millisecond)
|
||||
}
|
||||
|
||||
// Accounts B, C, D: 10 requests each at 50ms (normal customers)
|
||||
for _, id := range []string{"account-B", "account-C", "account-D"} {
|
||||
for range 10 {
|
||||
agg.Record(id, 50*time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
p95s := agg.FlushAndGetP95s()
|
||||
|
||||
// Should get exactly 4 P95 values — one per account
|
||||
assert.Len(t, p95s, 4, "each account should contribute exactly one P95")
|
||||
}
|
||||
|
||||
func TestStaleAccountEviction(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Use a very short MaxAge so we can test staleness
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 50*time.Millisecond)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
agg.Record("account-A", 100*time.Millisecond)
|
||||
agg.Record("account-B", 200*time.Millisecond)
|
||||
|
||||
// Both accounts should appear
|
||||
p95s := agg.FlushAndGetP95s()
|
||||
assert.Len(t, p95s, 2, "both accounts should have P95 values")
|
||||
|
||||
// Wait for account-A to become stale, then only update account-B
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
agg.Record("account-B", 200*time.Millisecond)
|
||||
|
||||
p95s = agg.FlushAndGetP95s()
|
||||
assert.Len(t, p95s, 1, "both accounts should have P95 values")
|
||||
|
||||
// account-A should have been evicted from the accounts map
|
||||
agg.mu.RLock()
|
||||
_, accountAExists := agg.accounts["account-A"]
|
||||
_, accountBExists := agg.accounts["account-B"]
|
||||
agg.mu.RUnlock()
|
||||
|
||||
assert.False(t, accountAExists, "stale account-A should be evicted from map")
|
||||
assert.True(t, accountBExists, "active account-B should remain in map")
|
||||
}
|
||||
|
||||
func TestStaleAccountEviction_DoesNotReappear(t *testing.T) {
|
||||
// Verify that with delta temporality, an evicted stale account does not
|
||||
// reappear in subsequent flushes.
|
||||
ctx := context.Background()
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 50*time.Millisecond)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
agg.Record("account-stale", 100*time.Millisecond)
|
||||
|
||||
// Wait for it to become stale
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
|
||||
// First flush: should detect staleness and evict
|
||||
_ = agg.FlushAndGetP95s()
|
||||
|
||||
agg.mu.RLock()
|
||||
_, exists := agg.accounts["account-stale"]
|
||||
agg.mu.RUnlock()
|
||||
assert.False(t, exists, "account should be evicted after first flush")
|
||||
|
||||
// Second flush: with delta temporality, the stale account should NOT reappear
|
||||
p95sSecond := agg.FlushAndGetP95s()
|
||||
assert.Empty(t, p95sSecond,
|
||||
"evicted account should not reappear in subsequent flushes with delta temporality")
|
||||
}
|
||||
|
||||
func TestP95Calculation_SingleSample(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
agg.Record("account-A", 150*time.Millisecond)
|
||||
|
||||
p95s := agg.FlushAndGetP95s()
|
||||
require.Len(t, p95s, 1)
|
||||
// With a single sample, P95 should be the bucket bound containing 150ms
|
||||
assert.Greater(t, p95s[0], int64(0), "P95 of a single sample should be positive")
|
||||
}
|
||||
|
||||
func TestP95Calculation_AllSameValue(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
// All samples are 100ms — P95 should be the bucket bound containing 100ms
|
||||
for range 100 {
|
||||
agg.Record("account-A", 100*time.Millisecond)
|
||||
}
|
||||
|
||||
p95s := agg.FlushAndGetP95s()
|
||||
require.Len(t, p95s, 1)
|
||||
assert.Greater(t, p95s[0], int64(0))
|
||||
}
|
||||
|
||||
func TestMultipleAccounts_IndependentP95s(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
agg := NewAccountDurationAggregator(ctx, time.Minute, 5*time.Minute)
|
||||
defer func(agg *AccountDurationAggregator) {
|
||||
err := agg.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to shutdown aggregator: %v", err)
|
||||
}
|
||||
}(agg)
|
||||
|
||||
// Account A: all fast (10ms)
|
||||
for range 100 {
|
||||
agg.Record("account-fast", 10*time.Millisecond)
|
||||
}
|
||||
|
||||
// Account B: all slow (5000ms)
|
||||
for range 100 {
|
||||
agg.Record("account-slow", 5000*time.Millisecond)
|
||||
}
|
||||
|
||||
p95s := agg.FlushAndGetP95s()
|
||||
require.Len(t, p95s, 2, "should have two P95 values")
|
||||
|
||||
// Find min and max — they should differ significantly
|
||||
minP95 := p95s[0]
|
||||
maxP95 := p95s[1]
|
||||
if minP95 > maxP95 {
|
||||
minP95, maxP95 = maxP95, minP95
|
||||
}
|
||||
|
||||
assert.Less(t, minP95, int64(1000),
|
||||
"fast account P95 should be well under 1000ms")
|
||||
assert.Greater(t, maxP95, int64(1000),
|
||||
"slow account P95 should be well over 1000ms")
|
||||
}
|
||||
@@ -13,18 +13,24 @@ const HighLatencyThreshold = time.Second * 7
|
||||
|
||||
// GRPCMetrics are gRPC server metrics
|
||||
type GRPCMetrics struct {
|
||||
meter metric.Meter
|
||||
syncRequestsCounter metric.Int64Counter
|
||||
syncRequestsBlockedCounter metric.Int64Counter
|
||||
loginRequestsCounter metric.Int64Counter
|
||||
loginRequestsBlockedCounter metric.Int64Counter
|
||||
loginRequestHighLatencyCounter metric.Int64Counter
|
||||
getKeyRequestsCounter metric.Int64Counter
|
||||
activeStreamsGauge metric.Int64ObservableGauge
|
||||
syncRequestDuration metric.Int64Histogram
|
||||
loginRequestDuration metric.Int64Histogram
|
||||
channelQueueLength metric.Int64Histogram
|
||||
ctx context.Context
|
||||
meter metric.Meter
|
||||
syncRequestsCounter metric.Int64Counter
|
||||
syncRequestsBlockedCounter metric.Int64Counter
|
||||
loginRequestsCounter metric.Int64Counter
|
||||
loginRequestsBlockedCounter metric.Int64Counter
|
||||
loginRequestHighLatencyCounter metric.Int64Counter
|
||||
getKeyRequestsCounter metric.Int64Counter
|
||||
activeStreamsGauge metric.Int64ObservableGauge
|
||||
syncRequestDuration metric.Int64Histogram
|
||||
syncRequestDurationP95ByAccount metric.Int64Histogram
|
||||
loginRequestDuration metric.Int64Histogram
|
||||
loginRequestDurationP95ByAccount metric.Int64Histogram
|
||||
channelQueueLength metric.Int64Histogram
|
||||
ctx context.Context
|
||||
|
||||
// Per-account aggregation
|
||||
syncDurationAggregator *AccountDurationAggregator
|
||||
loginDurationAggregator *AccountDurationAggregator
|
||||
}
|
||||
|
||||
// NewGRPCMetrics creates new GRPCMetrics struct and registers common metrics of the gRPC server
|
||||
@@ -93,6 +99,14 @@ func NewGRPCMetrics(ctx context.Context, meter metric.Meter) (*GRPCMetrics, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
syncRequestDurationP95ByAccount, err := meter.Int64Histogram("management.grpc.sync.request.duration.p95.by.account.ms",
|
||||
metric.WithUnit("milliseconds"),
|
||||
metric.WithDescription("P95 duration of sync requests aggregated per account - each data point represents one account's P95"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loginRequestDuration, err := meter.Int64Histogram("management.grpc.login.request.duration.ms",
|
||||
metric.WithUnit("milliseconds"),
|
||||
metric.WithDescription("Duration of the login gRPC requests from the peers to authenticate and receive initial configuration and relay credentials"),
|
||||
@@ -101,6 +115,14 @@ func NewGRPCMetrics(ctx context.Context, meter metric.Meter) (*GRPCMetrics, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loginRequestDurationP95ByAccount, err := meter.Int64Histogram("management.grpc.login.request.duration.p95.by.account.ms",
|
||||
metric.WithUnit("milliseconds"),
|
||||
metric.WithDescription("P95 duration of login requests aggregated per account - each data point represents one account's P95"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We use histogram here as we have multiple channel at the same time and we want to see a slice at any given time
|
||||
// Then we should be able to extract min, manx, mean and the percentiles.
|
||||
// TODO(yury): This needs custom bucketing as we are interested in the values from 0 to server.channelBufferSize (100)
|
||||
@@ -113,20 +135,32 @@ func NewGRPCMetrics(ctx context.Context, meter metric.Meter) (*GRPCMetrics, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &GRPCMetrics{
|
||||
meter: meter,
|
||||
syncRequestsCounter: syncRequestsCounter,
|
||||
syncRequestsBlockedCounter: syncRequestsBlockedCounter,
|
||||
loginRequestsCounter: loginRequestsCounter,
|
||||
loginRequestsBlockedCounter: loginRequestsBlockedCounter,
|
||||
loginRequestHighLatencyCounter: loginRequestHighLatencyCounter,
|
||||
getKeyRequestsCounter: getKeyRequestsCounter,
|
||||
activeStreamsGauge: activeStreamsGauge,
|
||||
syncRequestDuration: syncRequestDuration,
|
||||
loginRequestDuration: loginRequestDuration,
|
||||
channelQueueLength: channelQueue,
|
||||
ctx: ctx,
|
||||
}, err
|
||||
syncDurationAggregator := NewAccountDurationAggregator(ctx, 60*time.Second, 5*time.Minute)
|
||||
loginDurationAggregator := NewAccountDurationAggregator(ctx, 60*time.Second, 5*time.Minute)
|
||||
|
||||
grpcMetrics := &GRPCMetrics{
|
||||
meter: meter,
|
||||
syncRequestsCounter: syncRequestsCounter,
|
||||
syncRequestsBlockedCounter: syncRequestsBlockedCounter,
|
||||
loginRequestsCounter: loginRequestsCounter,
|
||||
loginRequestsBlockedCounter: loginRequestsBlockedCounter,
|
||||
loginRequestHighLatencyCounter: loginRequestHighLatencyCounter,
|
||||
getKeyRequestsCounter: getKeyRequestsCounter,
|
||||
activeStreamsGauge: activeStreamsGauge,
|
||||
syncRequestDuration: syncRequestDuration,
|
||||
syncRequestDurationP95ByAccount: syncRequestDurationP95ByAccount,
|
||||
loginRequestDuration: loginRequestDuration,
|
||||
loginRequestDurationP95ByAccount: loginRequestDurationP95ByAccount,
|
||||
channelQueueLength: channelQueue,
|
||||
ctx: ctx,
|
||||
syncDurationAggregator: syncDurationAggregator,
|
||||
loginDurationAggregator: loginDurationAggregator,
|
||||
}
|
||||
|
||||
go grpcMetrics.startSyncP95Flusher()
|
||||
go grpcMetrics.startLoginP95Flusher()
|
||||
|
||||
return grpcMetrics, err
|
||||
}
|
||||
|
||||
// CountSyncRequest counts the number of gRPC sync requests coming to the gRPC API
|
||||
@@ -157,6 +191,9 @@ func (grpcMetrics *GRPCMetrics) CountLoginRequestBlocked() {
|
||||
// CountLoginRequestDuration counts the duration of the login gRPC requests
|
||||
func (grpcMetrics *GRPCMetrics) CountLoginRequestDuration(duration time.Duration, accountID string) {
|
||||
grpcMetrics.loginRequestDuration.Record(grpcMetrics.ctx, duration.Milliseconds())
|
||||
|
||||
grpcMetrics.loginDurationAggregator.Record(accountID, duration)
|
||||
|
||||
if duration > HighLatencyThreshold {
|
||||
grpcMetrics.loginRequestHighLatencyCounter.Add(grpcMetrics.ctx, 1, metric.WithAttributes(attribute.String(AccountIDLabel, accountID)))
|
||||
}
|
||||
@@ -165,6 +202,44 @@ func (grpcMetrics *GRPCMetrics) CountLoginRequestDuration(duration time.Duration
|
||||
// CountSyncRequestDuration counts the duration of the sync gRPC requests
|
||||
func (grpcMetrics *GRPCMetrics) CountSyncRequestDuration(duration time.Duration, accountID string) {
|
||||
grpcMetrics.syncRequestDuration.Record(grpcMetrics.ctx, duration.Milliseconds())
|
||||
|
||||
grpcMetrics.syncDurationAggregator.Record(accountID, duration)
|
||||
}
|
||||
|
||||
// startSyncP95Flusher periodically flushes per-account sync P95 values to the histogram
|
||||
func (grpcMetrics *GRPCMetrics) startSyncP95Flusher() {
|
||||
ticker := time.NewTicker(grpcMetrics.syncDurationAggregator.FlushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-grpcMetrics.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
p95s := grpcMetrics.syncDurationAggregator.FlushAndGetP95s()
|
||||
for _, p95 := range p95s {
|
||||
grpcMetrics.syncRequestDurationP95ByAccount.Record(grpcMetrics.ctx, p95)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startLoginP95Flusher periodically flushes per-account login P95 values to the histogram
|
||||
func (grpcMetrics *GRPCMetrics) startLoginP95Flusher() {
|
||||
ticker := time.NewTicker(grpcMetrics.loginDurationAggregator.FlushInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-grpcMetrics.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
p95s := grpcMetrics.loginDurationAggregator.FlushAndGetP95s()
|
||||
for _, p95 := range p95s {
|
||||
grpcMetrics.loginRequestDurationP95ByAccount.Record(grpcMetrics.ctx, p95)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterConnectedStreams registers a function that collects number of active streams and feeds it to the metrics gauge.
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
"github.com/netbirdio/netbird/client/ssh/auth"
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones"
|
||||
"github.com/netbirdio/netbird/management/internals/modules/zones/records"
|
||||
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
|
||||
@@ -100,7 +100,7 @@ type Account struct {
|
||||
NameServerGroupsG []nbdns.NameServerGroup `json:"-" gorm:"foreignKey:AccountID;references:id"`
|
||||
DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"`
|
||||
PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"`
|
||||
Services []*reverseproxy.Service `gorm:"foreignKey:AccountID;references:id"`
|
||||
Services []*service.Service `gorm:"foreignKey:AccountID;references:id"`
|
||||
// Settings is a dictionary of Account settings
|
||||
Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"`
|
||||
Networks []*networkTypes.Network `gorm:"foreignKey:AccountID;references:id"`
|
||||
@@ -906,7 +906,7 @@ func (a *Account) Copy() *Account {
|
||||
networkResources = append(networkResources, resource.Copy())
|
||||
}
|
||||
|
||||
services := []*reverseproxy.Service{}
|
||||
services := []*service.Service{}
|
||||
for _, service := range a.Services {
|
||||
services = append(services, service.Copy())
|
||||
}
|
||||
@@ -1814,7 +1814,7 @@ func (a *Account) InjectProxyPolicies(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *reverseproxy.Service, proxyPeersByCluster map[string][]*nbpeer.Peer) {
|
||||
func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *service.Service, proxyPeersByCluster map[string][]*nbpeer.Peer) {
|
||||
for _, target := range service.Targets {
|
||||
if !target.Enabled {
|
||||
continue
|
||||
@@ -1823,7 +1823,7 @@ func (a *Account) injectServiceProxyPolicies(ctx context.Context, service *rever
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *reverseproxy.Service, target *reverseproxy.Target, proxyPeers []*nbpeer.Peer) {
|
||||
func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *service.Service, target *service.Target, proxyPeers []*nbpeer.Peer) {
|
||||
port, ok := a.resolveTargetPort(ctx, target)
|
||||
if !ok {
|
||||
return
|
||||
@@ -1840,7 +1840,7 @@ func (a *Account) injectTargetProxyPolicies(ctx context.Context, service *revers
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) resolveTargetPort(ctx context.Context, target *reverseproxy.Target) (int, bool) {
|
||||
func (a *Account) resolveTargetPort(ctx context.Context, target *service.Target) (int, bool) {
|
||||
if target.Port != 0 {
|
||||
return target.Port, true
|
||||
}
|
||||
@@ -1856,7 +1856,7 @@ func (a *Account) resolveTargetPort(ctx context.Context, target *reverseproxy.Ta
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) createProxyPolicy(service *reverseproxy.Service, target *reverseproxy.Target, proxyPeer *nbpeer.Peer, port int, path string) *Policy {
|
||||
func (a *Account) createProxyPolicy(service *service.Service, target *service.Target, proxyPeer *nbpeer.Peer, port int, path string) *Policy {
|
||||
policyID := fmt.Sprintf("proxy-access-%s-%s-%s", service.ID, proxyPeer.ID, path)
|
||||
return &Policy{
|
||||
ID: policyID,
|
||||
|
||||
@@ -742,6 +742,11 @@ func (am *DefaultAccountManager) processUserUpdate(ctx context.Context, transact
|
||||
if err != nil {
|
||||
return false, nil, nil, nil, fmt.Errorf("failed to re-read initiator user in transaction: %w", err)
|
||||
}
|
||||
|
||||
// Ensure the initiator still has admin privileges
|
||||
if initiatorUser.HasAdminPower() && !freshInitiator.HasAdminPower() {
|
||||
return false, nil, nil, nil, status.Errorf(status.PermissionDenied, "initiator role was changed during request processing")
|
||||
}
|
||||
initiatorUser = freshInitiator
|
||||
}
|
||||
|
||||
@@ -872,10 +877,6 @@ func validateUserUpdate(groupsMap map[string]*types.Group, initiatorUser, oldUse
|
||||
return nil
|
||||
}
|
||||
|
||||
if !initiatorUser.HasAdminPower() {
|
||||
return status.Errorf(status.PermissionDenied, "only admins and owners can update users")
|
||||
}
|
||||
|
||||
if initiatorUser.HasAdminPower() && initiatorUser.Id == update.Id && oldUser.Blocked != update.Blocked {
|
||||
return status.Errorf(status.PermissionDenied, "admins can't block or unblock themselves")
|
||||
}
|
||||
|
||||
@@ -2032,27 +2032,6 @@ func TestUser_Operations_WithEmbeddedIDP(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateUserUpdate_RejectsNonAdminInitiator(t *testing.T) {
|
||||
groupsMap := map[string]*types.Group{}
|
||||
|
||||
initiator := &types.User{
|
||||
Id: "initiator",
|
||||
Role: types.UserRoleUser,
|
||||
}
|
||||
oldUser := &types.User{
|
||||
Id: "target",
|
||||
Role: types.UserRoleUser,
|
||||
}
|
||||
update := &types.User{
|
||||
Id: "target",
|
||||
Role: types.UserRoleOwner,
|
||||
}
|
||||
|
||||
err := validateUserUpdate(groupsMap, initiator, oldUser, update)
|
||||
require.Error(t, err, "regular user should not be able to promote to owner")
|
||||
assert.Contains(t, err.Error(), "only admins and owners can update users")
|
||||
}
|
||||
|
||||
func TestProcessUserUpdate_RejectsStaleInitiatorRole(t *testing.T) {
|
||||
s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir())
|
||||
require.NoError(t, err)
|
||||
@@ -2109,7 +2088,7 @@ func TestProcessUserUpdate_RejectsStaleInitiatorRole(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Error(t, err, "processUserUpdate should reject stale initiator whose role was demoted")
|
||||
assert.Contains(t, err.Error(), "only admins and owners can update users")
|
||||
assert.Contains(t, err.Error(), "initiator role was changed during request processing")
|
||||
|
||||
targetUser, err := s.GetUserByUserID(context.Background(), store.LockingStrengthNone, targetID)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -42,6 +42,8 @@ var (
|
||||
acmeCerts bool
|
||||
acmeAddr string
|
||||
acmeDir string
|
||||
acmeEABKID string
|
||||
acmeEABHMACKey string
|
||||
acmeChallengeType string
|
||||
debugEndpoint bool
|
||||
debugEndpointAddr string
|
||||
@@ -74,6 +76,8 @@ func init() {
|
||||
rootCmd.Flags().BoolVar(&acmeCerts, "acme-certs", envBoolOrDefault("NB_PROXY_ACME_CERTIFICATES", false), "Generate ACME certificates automatically")
|
||||
rootCmd.Flags().StringVar(&acmeAddr, "acme-addr", envStringOrDefault("NB_PROXY_ACME_ADDRESS", ":80"), "HTTP address for ACME HTTP-01 challenges (only used when acme-challenge-type is http-01)")
|
||||
rootCmd.Flags().StringVar(&acmeDir, "acme-dir", envStringOrDefault("NB_PROXY_ACME_DIRECTORY", acme.LetsEncryptURL), "URL of ACME challenge directory")
|
||||
rootCmd.Flags().StringVar(&acmeEABKID, "acme-eab-kid", envStringOrDefault("NB_PROXY_ACME_EAB_KID", ""), "ACME EAB KID for account registration")
|
||||
rootCmd.Flags().StringVar(&acmeEABHMACKey, "acme-eab-hmac-key", envStringOrDefault("NB_PROXY_ACME_EAB_HMAC_KEY", ""), "ACME EAB HMAC key for account registration")
|
||||
rootCmd.Flags().StringVar(&acmeChallengeType, "acme-challenge-type", envStringOrDefault("NB_PROXY_ACME_CHALLENGE_TYPE", "tls-alpn-01"), "ACME challenge type: tls-alpn-01 (default, port 443 only) or http-01 (requires port 80)")
|
||||
rootCmd.Flags().BoolVar(&debugEndpoint, "debug-endpoint", envBoolOrDefault("NB_PROXY_DEBUG_ENDPOINT", false), "Enable debug HTTP endpoint")
|
||||
rootCmd.Flags().StringVar(&debugEndpointAddr, "debug-endpoint-addr", envStringOrDefault("NB_PROXY_DEBUG_ENDPOINT_ADDRESS", "localhost:8444"), "Address for the debug HTTP endpoint")
|
||||
@@ -149,6 +153,8 @@ func runServer(cmd *cobra.Command, args []string) error {
|
||||
GenerateACMECertificates: acmeCerts,
|
||||
ACMEChallengeAddress: acmeAddr,
|
||||
ACMEDirectory: acmeDir,
|
||||
ACMEEABKID: acmeEABKID,
|
||||
ACMEEABHMACKey: acmeEABHMACKey,
|
||||
ACMEChallengeType: acmeChallengeType,
|
||||
DebugEndpointEnabled: debugEndpoint,
|
||||
DebugEndpointAddress: debugEndpointAddr,
|
||||
|
||||
@@ -3,6 +3,7 @@ package accesslog
|
||||
import (
|
||||
"context"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -13,6 +14,23 @@ import (
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
requestThreshold = 10000 // Log every 10k requests
|
||||
bytesThreshold = 1024 * 1024 * 1024 // Log every 1GB
|
||||
usageCleanupPeriod = 1 * time.Hour // Clean up stale counters every hour
|
||||
usageInactiveWindow = 24 * time.Hour // Consider domain inactive if no traffic for 24 hours
|
||||
)
|
||||
|
||||
type domainUsage struct {
|
||||
requestCount int64
|
||||
requestStartTime time.Time
|
||||
|
||||
bytesTransferred int64
|
||||
bytesStartTime time.Time
|
||||
|
||||
lastActivity time.Time // Track last activity for cleanup
|
||||
}
|
||||
|
||||
type gRPCClient interface {
|
||||
SendAccessLog(ctx context.Context, in *proto.SendAccessLogRequest, opts ...grpc.CallOption) (*proto.SendAccessLogResponse, error)
|
||||
}
|
||||
@@ -22,6 +40,11 @@ type Logger struct {
|
||||
client gRPCClient
|
||||
logger *log.Logger
|
||||
trustedProxies []netip.Prefix
|
||||
|
||||
usageMux sync.Mutex
|
||||
domainUsage map[string]*domainUsage
|
||||
|
||||
cleanupCancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewLogger creates a new access log Logger. The trustedProxies parameter
|
||||
@@ -31,10 +54,26 @@ func NewLogger(client gRPCClient, logger *log.Logger, trustedProxies []netip.Pre
|
||||
if logger == nil {
|
||||
logger = log.StandardLogger()
|
||||
}
|
||||
return &Logger{
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
l := &Logger{
|
||||
client: client,
|
||||
logger: logger,
|
||||
trustedProxies: trustedProxies,
|
||||
domainUsage: make(map[string]*domainUsage),
|
||||
cleanupCancel: cancel,
|
||||
}
|
||||
|
||||
// Start background cleanup routine
|
||||
go l.cleanupStaleUsage(ctx)
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Close stops the cleanup routine. Should be called during graceful shutdown.
|
||||
func (l *Logger) Close() {
|
||||
if l.cleanupCancel != nil {
|
||||
l.cleanupCancel()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +90,8 @@ type logEntry struct {
|
||||
AuthMechanism string
|
||||
UserId string
|
||||
AuthSuccess bool
|
||||
BytesUpload int64
|
||||
BytesDownload int64
|
||||
}
|
||||
|
||||
func (l *Logger) log(ctx context.Context, entry logEntry) {
|
||||
@@ -84,6 +125,8 @@ func (l *Logger) log(ctx context.Context, entry logEntry) {
|
||||
AuthMechanism: entry.AuthMechanism,
|
||||
UserId: entry.UserId,
|
||||
AuthSuccess: entry.AuthSuccess,
|
||||
BytesUpload: entry.BytesUpload,
|
||||
BytesDownload: entry.BytesDownload,
|
||||
},
|
||||
}); err != nil {
|
||||
// If it fails to send on the gRPC connection, then at least log it to the error log.
|
||||
@@ -103,3 +146,82 @@ func (l *Logger) log(ctx context.Context, entry logEntry) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// trackUsage records request and byte counts per domain, logging when thresholds are hit.
|
||||
func (l *Logger) trackUsage(domain string, bytesTransferred int64) {
|
||||
if domain == "" {
|
||||
return
|
||||
}
|
||||
|
||||
l.usageMux.Lock()
|
||||
defer l.usageMux.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
usage, exists := l.domainUsage[domain]
|
||||
if !exists {
|
||||
usage = &domainUsage{
|
||||
requestStartTime: now,
|
||||
bytesStartTime: now,
|
||||
lastActivity: now,
|
||||
}
|
||||
l.domainUsage[domain] = usage
|
||||
}
|
||||
|
||||
usage.lastActivity = now
|
||||
|
||||
usage.requestCount++
|
||||
if usage.requestCount >= requestThreshold {
|
||||
elapsed := time.Since(usage.requestStartTime)
|
||||
l.logger.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"requests": usage.requestCount,
|
||||
"duration": elapsed.String(),
|
||||
}).Infof("domain %s had %d requests over %s", domain, usage.requestCount, elapsed)
|
||||
|
||||
usage.requestCount = 0
|
||||
usage.requestStartTime = now
|
||||
}
|
||||
|
||||
usage.bytesTransferred += bytesTransferred
|
||||
if usage.bytesTransferred >= bytesThreshold {
|
||||
elapsed := time.Since(usage.bytesStartTime)
|
||||
bytesInGB := float64(usage.bytesTransferred) / (1024 * 1024 * 1024)
|
||||
l.logger.WithFields(log.Fields{
|
||||
"domain": domain,
|
||||
"bytes": usage.bytesTransferred,
|
||||
"bytes_gb": bytesInGB,
|
||||
"duration": elapsed.String(),
|
||||
}).Infof("domain %s transferred %.2f GB over %s", domain, bytesInGB, elapsed)
|
||||
|
||||
usage.bytesTransferred = 0
|
||||
usage.bytesStartTime = now
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStaleUsage removes usage entries for domains that have been inactive.
|
||||
func (l *Logger) cleanupStaleUsage(ctx context.Context) {
|
||||
ticker := time.NewTicker(usageCleanupPeriod)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
l.usageMux.Lock()
|
||||
now := time.Now()
|
||||
removed := 0
|
||||
for domain, usage := range l.domainUsage {
|
||||
if now.Sub(usage.lastActivity) > usageInactiveWindow {
|
||||
delete(l.domainUsage, domain)
|
||||
removed++
|
||||
}
|
||||
}
|
||||
l.usageMux.Unlock()
|
||||
|
||||
if removed > 0 {
|
||||
l.logger.Debugf("cleaned up %d stale domain usage entries", removed)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,14 @@ func (l *Logger) Middleware(next http.Handler) http.Handler {
|
||||
status: http.StatusOK,
|
||||
}
|
||||
|
||||
var bytesRead int64
|
||||
if r.Body != nil {
|
||||
r.Body = &bodyCounter{
|
||||
ReadCloser: r.Body,
|
||||
bytesRead: &bytesRead,
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the source IP using trusted proxy configuration before passing
|
||||
// the request on, as the proxy will modify forwarding headers.
|
||||
sourceIp := extractSourceIP(r, l.trustedProxies)
|
||||
@@ -53,6 +61,9 @@ func (l *Logger) Middleware(next http.Handler) http.Handler {
|
||||
host = r.Host
|
||||
}
|
||||
|
||||
bytesUpload := bytesRead
|
||||
bytesDownload := sw.bytesWritten
|
||||
|
||||
entry := logEntry{
|
||||
ID: requestID,
|
||||
ServiceId: capturedData.GetServiceId(),
|
||||
@@ -66,10 +77,15 @@ func (l *Logger) Middleware(next http.Handler) http.Handler {
|
||||
AuthMechanism: capturedData.GetAuthMethod(),
|
||||
UserId: capturedData.GetUserID(),
|
||||
AuthSuccess: sw.status != http.StatusUnauthorized && sw.status != http.StatusForbidden,
|
||||
BytesUpload: bytesUpload,
|
||||
BytesDownload: bytesDownload,
|
||||
}
|
||||
l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s",
|
||||
requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceId(), capturedData.GetAccountId())
|
||||
|
||||
l.log(r.Context(), entry)
|
||||
|
||||
// Track usage for cost monitoring (upload + download) by domain
|
||||
l.trackUsage(host, bytesUpload+bytesDownload)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,18 +1,39 @@
|
||||
package accesslog
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/netbirdio/netbird/proxy/internal/responsewriter"
|
||||
)
|
||||
|
||||
// statusWriter captures the HTTP status code from WriteHeader calls.
|
||||
// statusWriter captures the HTTP status code and bytes written from responses.
|
||||
// It embeds responsewriter.PassthroughWriter which handles all the optional
|
||||
// interfaces (Hijacker, Flusher, Pusher) automatically.
|
||||
type statusWriter struct {
|
||||
*responsewriter.PassthroughWriter
|
||||
status int
|
||||
status int
|
||||
bytesWritten int64
|
||||
}
|
||||
|
||||
func (w *statusWriter) WriteHeader(status int) {
|
||||
w.status = status
|
||||
w.PassthroughWriter.WriteHeader(status)
|
||||
}
|
||||
|
||||
func (w *statusWriter) Write(b []byte) (int, error) {
|
||||
n, err := w.PassthroughWriter.Write(b)
|
||||
w.bytesWritten += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// bodyCounter wraps an io.ReadCloser and counts bytes read from the request body.
|
||||
type bodyCounter struct {
|
||||
io.ReadCloser
|
||||
bytesRead *int64
|
||||
}
|
||||
|
||||
func (bc *bodyCounter) Read(p []byte) (int, error) {
|
||||
n, err := bc.ReadCloser.Read(p)
|
||||
*bc.bytesRead += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -41,6 +42,10 @@ type domainInfo struct {
|
||||
err string
|
||||
}
|
||||
|
||||
type metricsRecorder interface {
|
||||
RecordCertificateIssuance(duration time.Duration)
|
||||
}
|
||||
|
||||
// Manager wraps autocert.Manager with domain tracking and cross-replica
|
||||
// coordination via a pluggable locking strategy. The locker prevents
|
||||
// duplicate ACME requests when multiple replicas share a certificate cache.
|
||||
@@ -54,12 +59,16 @@ type Manager struct {
|
||||
|
||||
certNotifier certificateNotifier
|
||||
logger *log.Logger
|
||||
metrics metricsRecorder
|
||||
}
|
||||
|
||||
// NewManager creates a new ACME certificate manager. The certDir is used
|
||||
// for caching certificates. The lockMethod controls cross-replica
|
||||
// coordination strategy (see CertLockMethod constants).
|
||||
func NewManager(certDir, acmeURL string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod) *Manager {
|
||||
// eabKID and eabHMACKey are optional External Account Binding credentials
|
||||
// required for some CAs like ZeroSSL. The eabHMACKey should be the base64
|
||||
// URL-encoded string provided by the CA.
|
||||
func NewManager(certDir, acmeURL, eabKID, eabHMACKey string, notifier certificateNotifier, logger *log.Logger, lockMethod CertLockMethod, metrics metricsRecorder) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.StandardLogger()
|
||||
}
|
||||
@@ -69,11 +78,28 @@ func NewManager(certDir, acmeURL string, notifier certificateNotifier, logger *l
|
||||
domains: make(map[domain.Domain]*domainInfo),
|
||||
certNotifier: notifier,
|
||||
logger: logger,
|
||||
metrics: metrics,
|
||||
}
|
||||
|
||||
var eab *acme.ExternalAccountBinding
|
||||
if eabKID != "" && eabHMACKey != "" {
|
||||
decodedKey, err := base64.RawURLEncoding.DecodeString(eabHMACKey)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to decode EAB HMAC key: %v", err)
|
||||
} else {
|
||||
eab = &acme.ExternalAccountBinding{
|
||||
KID: eabKID,
|
||||
Key: decodedKey,
|
||||
}
|
||||
logger.Infof("configured External Account Binding with KID: %s", eabKID)
|
||||
}
|
||||
}
|
||||
|
||||
mgr.Manager = &autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: mgr.hostPolicy,
|
||||
Cache: autocert.DirCache(certDir),
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: mgr.hostPolicy,
|
||||
Cache: autocert.DirCache(certDir),
|
||||
ExternalAccountBinding: eab,
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: acmeURL,
|
||||
},
|
||||
@@ -109,24 +135,45 @@ func (mgr *Manager) AddDomain(d domain.Domain, accountID, serviceID string) {
|
||||
|
||||
// prefetchCertificate proactively triggers certificate generation for a domain.
|
||||
// It acquires a distributed lock to prevent multiple replicas from issuing
|
||||
// duplicate ACME requests. The second replica will block until the first
|
||||
// finishes, then find the certificate in the cache.
|
||||
// duplicate ACME requests. If the certificate appears in the cache while waiting
|
||||
// for the lock, it cancels the wait and uses the cached certificate.
|
||||
func (mgr *Manager) prefetchCertificate(d domain.Domain) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
name := d.PunycodeString()
|
||||
|
||||
if mgr.certExistsInCache(ctx, name) {
|
||||
mgr.logger.Infof("certificate for domain %q already exists in cache before lock attempt", name)
|
||||
mgr.loadAndFinalizeCachedCert(ctx, d, name)
|
||||
return
|
||||
}
|
||||
|
||||
go mgr.pollCacheAndCancel(ctx, name, cancel)
|
||||
|
||||
// Acquire lock
|
||||
mgr.logger.Infof("acquiring cert lock for domain %q", name)
|
||||
lockStart := time.Now()
|
||||
unlock, err := mgr.locker.Lock(ctx, name)
|
||||
if err != nil {
|
||||
mgr.logger.Warnf("acquire cert lock for domain %q, proceeding without lock: %v", name, err)
|
||||
if mgr.certExistsInCache(context.Background(), name) {
|
||||
mgr.logger.Infof("certificate for domain %q appeared in cache while waiting for lock", name)
|
||||
mgr.loadAndFinalizeCachedCert(context.Background(), d, name)
|
||||
return
|
||||
}
|
||||
mgr.logger.Warnf("acquire cert lock for domain %q: %v", name, err)
|
||||
// Continue without lock
|
||||
} else {
|
||||
mgr.logger.Infof("acquired cert lock for domain %q in %s", name, time.Since(lockStart))
|
||||
defer unlock()
|
||||
}
|
||||
|
||||
if mgr.certExistsInCache(ctx, name) {
|
||||
mgr.logger.Infof("certificate for domain %q already exists in cache after lock acquisition, skipping ACME request", name)
|
||||
mgr.loadAndFinalizeCachedCert(ctx, d, name)
|
||||
return
|
||||
}
|
||||
|
||||
hello := &tls.ClientHelloInfo{
|
||||
ServerName: name,
|
||||
Conn: &dummyConn{ctx: ctx},
|
||||
@@ -136,11 +183,15 @@ func (mgr *Manager) prefetchCertificate(d domain.Domain) {
|
||||
cert, err := mgr.GetCertificate(hello)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
mgr.logger.Warnf("prefetch certificate for domain %q: %v", name, err)
|
||||
mgr.logger.Warnf("prefetch certificate for domain %q in %s: %v", name, elapsed.String(), err)
|
||||
mgr.setDomainState(d, domainFailed, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if mgr.metrics != nil {
|
||||
mgr.metrics.RecordCertificateIssuance(elapsed)
|
||||
}
|
||||
|
||||
mgr.setDomainState(d, domainReady, "")
|
||||
|
||||
now := time.Now()
|
||||
@@ -179,6 +230,78 @@ func (mgr *Manager) setDomainState(d domain.Domain, state domainState, errMsg st
|
||||
}
|
||||
}
|
||||
|
||||
// certExistsInCache checks if a certificate exists on the shared disk for the given domain.
|
||||
func (mgr *Manager) certExistsInCache(ctx context.Context, domain string) bool {
|
||||
if mgr.Cache == nil {
|
||||
return false
|
||||
}
|
||||
_, err := mgr.Cache.Get(ctx, domain)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// pollCacheAndCancel periodically checks if a certificate appears in the cache.
|
||||
// If found, it cancels the context to abort the lock wait.
|
||||
func (mgr *Manager) pollCacheAndCancel(ctx context.Context, domain string, cancel context.CancelFunc) {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if mgr.certExistsInCache(context.Background(), domain) {
|
||||
mgr.logger.Debugf("cert detected in cache for domain %q, cancelling lock wait", domain)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadAndFinalizeCachedCert loads a certificate from cache and updates domain state.
|
||||
func (mgr *Manager) loadAndFinalizeCachedCert(ctx context.Context, d domain.Domain, name string) {
|
||||
hello := &tls.ClientHelloInfo{
|
||||
ServerName: name,
|
||||
Conn: &dummyConn{ctx: ctx},
|
||||
}
|
||||
|
||||
cert, err := mgr.GetCertificate(hello)
|
||||
if err != nil {
|
||||
mgr.logger.Warnf("load cached certificate for domain %q: %v", name, err)
|
||||
mgr.setDomainState(d, domainFailed, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
mgr.setDomainState(d, domainReady, "")
|
||||
|
||||
now := time.Now()
|
||||
if cert != nil && cert.Leaf != nil {
|
||||
leaf := cert.Leaf
|
||||
mgr.logger.Infof("loaded cached certificate for domain %q: serial=%s SANs=%v notBefore=%s, notAfter=%s, now=%s",
|
||||
name,
|
||||
leaf.SerialNumber.Text(16),
|
||||
leaf.DNSNames,
|
||||
leaf.NotBefore.UTC().Format(time.RFC3339),
|
||||
leaf.NotAfter.UTC().Format(time.RFC3339),
|
||||
now.UTC().Format(time.RFC3339),
|
||||
)
|
||||
mgr.logCertificateDetails(name, leaf, now)
|
||||
} else {
|
||||
mgr.logger.Infof("loaded cached certificate for domain %q", name)
|
||||
}
|
||||
|
||||
mgr.mu.RLock()
|
||||
info := mgr.domains[d]
|
||||
mgr.mu.RUnlock()
|
||||
|
||||
if info != nil && mgr.certNotifier != nil {
|
||||
if err := mgr.certNotifier.NotifyCertificateIssued(ctx, info.accountID, info.serviceID, name); err != nil {
|
||||
mgr.logger.Warnf("notify certificate ready for domain %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// logCertificateDetails logs certificate validity and SCT timestamps.
|
||||
func (mgr *Manager) logCertificateDetails(domain string, cert *x509.Certificate, now time.Time) {
|
||||
if cert.NotBefore.After(now) {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestHostPolicy(t *testing.T) {
|
||||
mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", nil, nil, "")
|
||||
mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "", nil)
|
||||
mgr.AddDomain("example.com", "acc1", "rp1")
|
||||
|
||||
// Wait for the background prefetch goroutine to finish so the temp dir
|
||||
@@ -70,7 +70,7 @@ func TestHostPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDomainStates(t *testing.T) {
|
||||
mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", nil, nil, "")
|
||||
mgr := NewManager(t.TempDir(), "https://acme.example.com/directory", "", "", nil, nil, "", nil)
|
||||
|
||||
assert.Equal(t, 0, mgr.PendingCerts(), "initially zero")
|
||||
assert.Equal(t, 0, mgr.TotalDomains(), "initially zero domains")
|
||||
|
||||
@@ -1,64 +1,106 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
|
||||
"github.com/netbirdio/netbird/proxy/internal/proxy"
|
||||
"github.com/netbirdio/netbird/proxy/internal/responsewriter"
|
||||
)
|
||||
|
||||
type Metrics struct {
|
||||
requestsTotal prometheus.Counter
|
||||
activeRequests prometheus.Gauge
|
||||
configuredDomains prometheus.Gauge
|
||||
pathsPerDomain *prometheus.GaugeVec
|
||||
requestDuration *prometheus.HistogramVec
|
||||
backendDuration *prometheus.HistogramVec
|
||||
ctx context.Context
|
||||
requestsTotal metric.Int64Counter
|
||||
activeRequests metric.Int64UpDownCounter
|
||||
configuredDomains metric.Int64UpDownCounter
|
||||
totalPaths metric.Int64UpDownCounter
|
||||
requestDuration metric.Int64Histogram
|
||||
backendDuration metric.Int64Histogram
|
||||
certificateIssueDuration metric.Int64Histogram
|
||||
|
||||
mappingsMux sync.Mutex
|
||||
mappingPaths map[string]int
|
||||
}
|
||||
|
||||
func New(reg prometheus.Registerer) *Metrics {
|
||||
promFactory := promauto.With(reg)
|
||||
return &Metrics{
|
||||
requestsTotal: promFactory.NewCounter(prometheus.CounterOpts{
|
||||
Name: "netbird_proxy_requests_total",
|
||||
Help: "Total number of requests made to the netbird proxy",
|
||||
}),
|
||||
activeRequests: promFactory.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "netbird_proxy_active_requests_count",
|
||||
Help: "Current in-flight requests handled by the netbird proxy",
|
||||
}),
|
||||
configuredDomains: promFactory.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "netbird_proxy_domains_count",
|
||||
Help: "Current number of domains configured on the netbird proxy",
|
||||
}),
|
||||
pathsPerDomain: promFactory.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "netbird_proxy_paths_count",
|
||||
Help: "Current number of paths configured on the netbird proxy labelled by domain",
|
||||
},
|
||||
[]string{"domain"},
|
||||
),
|
||||
requestDuration: promFactory.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "netbird_proxy_request_duration_seconds",
|
||||
Help: "Duration of requests made to the netbird proxy",
|
||||
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
|
||||
},
|
||||
[]string{"status", "size", "method", "host", "path"},
|
||||
),
|
||||
backendDuration: promFactory.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "netbird_proxy_backend_duration_seconds",
|
||||
Help: "Duration of peer round trip time from the netbird proxy",
|
||||
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
|
||||
},
|
||||
[]string{"status", "size", "method", "host", "path"},
|
||||
),
|
||||
func New(ctx context.Context, meter metric.Meter) (*Metrics, error) {
|
||||
requestsTotal, err := meter.Int64Counter(
|
||||
"proxy.http.request.counter",
|
||||
metric.WithUnit("1"),
|
||||
metric.WithDescription("Total number of requests made to the netbird proxy"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeRequests, err := meter.Int64UpDownCounter(
|
||||
"proxy.http.active_requests",
|
||||
metric.WithUnit("1"),
|
||||
metric.WithDescription("Current in-flight requests handled by the netbird proxy"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configuredDomains, err := meter.Int64UpDownCounter(
|
||||
"proxy.domains.count",
|
||||
metric.WithUnit("1"),
|
||||
metric.WithDescription("Current number of domains configured on the netbird proxy"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalPaths, err := meter.Int64UpDownCounter(
|
||||
"proxy.paths.count",
|
||||
metric.WithUnit("1"),
|
||||
metric.WithDescription("Total number of paths configured on the netbird proxy"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestDuration, err := meter.Int64Histogram(
|
||||
"proxy.http.request.duration.ms",
|
||||
metric.WithUnit("milliseconds"),
|
||||
metric.WithDescription("Duration of requests made to the netbird proxy"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
backendDuration, err := meter.Int64Histogram(
|
||||
"proxy.backend.duration.ms",
|
||||
metric.WithUnit("milliseconds"),
|
||||
metric.WithDescription("Duration of peer round trip time from the netbird proxy"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
certificateIssueDuration, err := meter.Int64Histogram(
|
||||
"proxy.certificate.issue.duration.ms",
|
||||
metric.WithUnit("milliseconds"),
|
||||
metric.WithDescription("Duration of ACME certificate issuance"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Metrics{
|
||||
ctx: ctx,
|
||||
requestsTotal: requestsTotal,
|
||||
activeRequests: activeRequests,
|
||||
configuredDomains: configuredDomains,
|
||||
totalPaths: totalPaths,
|
||||
requestDuration: requestDuration,
|
||||
backendDuration: backendDuration,
|
||||
certificateIssueDuration: certificateIssueDuration,
|
||||
mappingPaths: make(map[string]int),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type responseInterceptor struct {
|
||||
@@ -80,23 +122,19 @@ func (w *responseInterceptor) Write(b []byte) (int, error) {
|
||||
|
||||
func (m *Metrics) Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
m.requestsTotal.Inc()
|
||||
m.activeRequests.Inc()
|
||||
m.requestsTotal.Add(m.ctx, 1)
|
||||
m.activeRequests.Add(m.ctx, 1)
|
||||
|
||||
interceptor := &responseInterceptor{PassthroughWriter: responsewriter.New(w)}
|
||||
|
||||
start := time.Now()
|
||||
next.ServeHTTP(interceptor, r)
|
||||
duration := time.Since(start)
|
||||
defer func() {
|
||||
duration := time.Since(start)
|
||||
m.activeRequests.Add(m.ctx, -1)
|
||||
m.requestDuration.Record(m.ctx, duration.Milliseconds())
|
||||
}()
|
||||
|
||||
m.activeRequests.Desc()
|
||||
m.requestDuration.With(prometheus.Labels{
|
||||
"status": strconv.Itoa(interceptor.status),
|
||||
"size": strconv.Itoa(interceptor.size),
|
||||
"method": r.Method,
|
||||
"host": r.Host,
|
||||
"path": r.URL.Path,
|
||||
}).Observe(duration.Seconds())
|
||||
next.ServeHTTP(interceptor, r)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -108,44 +146,52 @@ func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
|
||||
func (m *Metrics) RoundTripper(next http.RoundTripper) http.RoundTripper {
|
||||
return roundTripperFunc(func(req *http.Request) (*http.Response, error) {
|
||||
labels := prometheus.Labels{
|
||||
"method": req.Method,
|
||||
"host": req.Host,
|
||||
// Fill potentially empty labels with default values to avoid cardinality issues.
|
||||
"path": "/",
|
||||
"status": "0",
|
||||
"size": "0",
|
||||
}
|
||||
if req.URL != nil {
|
||||
labels["path"] = req.URL.Path
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
res, err := next.RoundTrip(req)
|
||||
duration := time.Since(start)
|
||||
|
||||
// Not all labels will be available if there was an error.
|
||||
if res != nil {
|
||||
labels["status"] = strconv.Itoa(res.StatusCode)
|
||||
labels["size"] = strconv.Itoa(int(res.ContentLength))
|
||||
}
|
||||
|
||||
m.backendDuration.With(labels).Observe(duration.Seconds())
|
||||
m.backendDuration.Record(m.ctx, duration.Milliseconds())
|
||||
|
||||
return res, err
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Metrics) AddMapping(mapping proxy.Mapping) {
|
||||
m.configuredDomains.Inc()
|
||||
m.pathsPerDomain.With(prometheus.Labels{
|
||||
"domain": mapping.Host,
|
||||
}).Set(float64(len(mapping.Paths)))
|
||||
m.mappingsMux.Lock()
|
||||
defer m.mappingsMux.Unlock()
|
||||
|
||||
newPathCount := len(mapping.Paths)
|
||||
oldPathCount, exists := m.mappingPaths[mapping.Host]
|
||||
|
||||
if !exists {
|
||||
m.configuredDomains.Add(m.ctx, 1)
|
||||
}
|
||||
|
||||
pathDelta := newPathCount - oldPathCount
|
||||
if pathDelta != 0 {
|
||||
m.totalPaths.Add(m.ctx, int64(pathDelta))
|
||||
}
|
||||
|
||||
m.mappingPaths[mapping.Host] = newPathCount
|
||||
}
|
||||
|
||||
func (m *Metrics) RemoveMapping(mapping proxy.Mapping) {
|
||||
m.configuredDomains.Dec()
|
||||
m.pathsPerDomain.With(prometheus.Labels{
|
||||
"domain": mapping.Host,
|
||||
}).Set(0)
|
||||
m.mappingsMux.Lock()
|
||||
defer m.mappingsMux.Unlock()
|
||||
|
||||
oldPathCount, exists := m.mappingPaths[mapping.Host]
|
||||
if !exists {
|
||||
// Nothing to remove
|
||||
return
|
||||
}
|
||||
|
||||
m.configuredDomains.Add(m.ctx, -1)
|
||||
m.totalPaths.Add(m.ctx, -int64(oldPathCount))
|
||||
|
||||
delete(m.mappingPaths, mapping.Host)
|
||||
}
|
||||
|
||||
// RecordCertificateIssuance records the duration of a certificate issuance.
|
||||
func (m *Metrics) RecordCertificateIssuance(duration time.Duration) {
|
||||
m.certificateIssueDuration.Record(m.ctx, duration.Milliseconds())
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package metrics_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
"github.com/netbirdio/netbird/proxy/internal/metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type testRoundTripper struct {
|
||||
@@ -47,7 +51,19 @@ func TestMetrics_RoundTripper(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
m := metrics.New(prometheus.NewRegistry())
|
||||
exporter, err := prometheus.New()
|
||||
if err != nil {
|
||||
t.Fatalf("create prometheus exporter: %v", err)
|
||||
}
|
||||
|
||||
provider := metric.NewMeterProvider(metric.WithReader(exporter))
|
||||
pkg := reflect.TypeOf(metrics.Metrics{}).PkgPath()
|
||||
meter := provider.Meter(pkg)
|
||||
|
||||
m, err := metrics.New(context.Background(), meter)
|
||||
if err != nil {
|
||||
t.Fatalf("create metrics: %v", err)
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user